hexsha
stringlengths 40
40
| size
int64 6
14.9M
| ext
stringclasses 1
value | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 6
260
| max_stars_repo_name
stringlengths 6
119
| max_stars_repo_head_hexsha
stringlengths 40
41
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 6
260
| max_issues_repo_name
stringlengths 6
119
| max_issues_repo_head_hexsha
stringlengths 40
41
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 6
260
| max_forks_repo_name
stringlengths 6
119
| max_forks_repo_head_hexsha
stringlengths 40
41
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | avg_line_length
float64 2
1.04M
| max_line_length
int64 2
11.2M
| alphanum_fraction
float64 0
1
| cells
sequence | cell_types
sequence | cell_type_groups
sequence |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e7f94fdf88d919b502cc39bdcb9cd3053c9e170a | 279,537 | ipynb | Jupyter Notebook | examples/simulation_demo.ipynb | SelfExplainML/GAMMLI | 74b2e0ab9a7576ff676eef0cc4c55484fbfe6e72 | [
"MIT"
] | 3 | 2020-12-16T11:37:54.000Z | 2021-04-15T12:38:29.000Z | examples/simulation_demo.ipynb | gyf9712/GAMMLI | 94ada0be0866d607a714c546070e8cc78616895b | [
"MIT"
] | 1 | 2021-08-02T09:43:50.000Z | 2022-03-09T09:59:09.000Z | examples/simulation_demo.ipynb | gyf9712/GAMMLI | 94ada0be0866d607a714c546070e8cc78616895b | [
"MIT"
] | 3 | 2021-02-27T07:05:07.000Z | 2022-02-25T00:57:45.000Z | 240.358555 | 113,836 | 0.875541 | [
[
[
"## Regression",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom collections import OrderedDict\nimport time\nfrom sklearn.metrics import mean_squared_error,roc_auc_score,mean_absolute_error,log_loss\nimport sys\nfrom gammli import GAMMLI\nfrom gammli.dataReader import data_initialize\nfrom gammli.utils import local_visualize\nfrom gammli.utils import global_visualize_density\nfrom gammli.utils import feature_importance_visualize\nfrom gammli.utils import plot_trajectory\nfrom gammli.utils import plot_regularization\nimport tensorflow as tf\n\nrandom_state = 0\ndata= pd.read_csv('data/simulation/sim_0.9_new.csv')\ntask_type = \"Regression\"\n\nmeta_info = OrderedDict()\n\nmeta_info['x1']={'type': 'continues','source':'user'}\nmeta_info['x2']={'type': 'continues','source':'user'}\nmeta_info['x3']={'type': 'continues','source':'user'}\nmeta_info['x4']={'type': 'continues','source':'user'}\nmeta_info['x5']={'type': 'continues','source':'user'}\nmeta_info['z1']={'type': 'continues','source':'item'}\nmeta_info['z2']={'type': 'continues','source':'item'}\nmeta_info['z3']={'type': 'continues','source':'item'}\nmeta_info['z4']={'type': 'continues','source':'item'}\nmeta_info['z5']={'type': 'continues','source':'item'}\nmeta_info['user_id']={\"type\":\"id\",'source':'user'}\nmeta_info['item_id']={\"type\":\"id\",'source':'item'}\nmeta_info['target']={\"type\":\"target\",'source':''}",
"_____no_output_____"
],
[
"random_state = 0\ntrain , test = train_test_split(data,test_size=0.2 ,random_state=0)\n\ntr_x, tr_Xi, tr_y, tr_idx, te_x, te_Xi, te_y, val_x, val_Xi, val_y, val_idx, meta_info, model_info,sy,sy_t = data_initialize(train,test,meta_info,task_type ,'warm', random_state, True)\nmodel = GAMMLI(wc='warm',model_info=model_info, meta_info=meta_info, subnet_arch=[20, 10],interact_arch=[20, 10],activation_func=tf.tanh, batch_size=min(500, int(0.2*tr_x.shape[0])), lr_bp=0.001, auto_tune=False,\n interaction_epochs=1000,main_effect_epochs=1000,tuning_epochs=200,loss_threshold_main=0.01,loss_threshold_inter=0.1,\n verbose=True, early_stop_thres=20,interact_num=10,n_power_iterations=5,n_oversamples=10, u_group_num=10, i_group_num=10, reg_clarity=10, lambda_=5,\n mf_training_iters=200,change_mode=False,convergence_threshold=0.0001,max_rank=3,interaction_restrict='intra', si_approach ='als')\nmodel.fit(tr_x, val_x, tr_y, val_y, tr_Xi, val_Xi, tr_idx, val_idx)",
"test cold start user: 0\ntest cold start item: 0\nvalidation cold start user: 0\nvalidation cold start item: 0\n####################GAMI-Net training start.####################\n##########Stage 1: main effect training start.##########\nMain effects training epoch: 1, train loss: 0.13920, val loss: 0.13845\nMain effects training epoch: 2, train loss: 0.13449, val loss: 0.13398\nMain effects training epoch: 3, train loss: 0.13403, val loss: 0.13371\nMain effects training epoch: 4, train loss: 0.13358, val loss: 0.13306\nMain effects training epoch: 5, train loss: 0.13331, val loss: 0.13291\nMain effects training epoch: 6, train loss: 0.13317, val loss: 0.13281\nMain effects training epoch: 7, train loss: 0.13301, val loss: 0.13266\nMain effects training epoch: 8, train loss: 0.13355, val loss: 0.13296\nMain effects training epoch: 9, train loss: 0.13284, val loss: 0.13236\nMain effects training epoch: 10, train loss: 0.13282, val loss: 0.13232\nMain effects training epoch: 11, train loss: 0.13307, val loss: 0.13286\nMain effects training epoch: 12, train loss: 0.13194, val loss: 0.13148\nMain effects training epoch: 13, train loss: 0.13173, val loss: 0.13129\nMain effects training epoch: 14, train loss: 0.13165, val loss: 0.13108\nMain effects training epoch: 15, train loss: 0.13154, val loss: 0.13105\nMain effects training epoch: 16, train loss: 0.13174, val loss: 0.13131\nMain effects training epoch: 17, train loss: 0.13161, val loss: 0.13107\nMain effects training epoch: 18, train loss: 0.13121, val loss: 0.13071\nMain effects training epoch: 19, train loss: 0.13019, val loss: 0.12960\nMain effects training epoch: 20, train loss: 0.12958, val loss: 0.12889\nMain effects training epoch: 21, train loss: 0.12963, val loss: 0.12888\nMain effects training epoch: 22, train loss: 0.12959, val loss: 0.12888\nMain effects training epoch: 23, train loss: 0.12950, val loss: 0.12901\nMain effects training epoch: 24, train loss: 0.12995, val loss: 0.12917\nMain effects training epoch: 25, train loss: 0.12989, val loss: 0.12921\nMain effects training epoch: 26, train loss: 0.12954, val loss: 0.12884\nMain effects training epoch: 27, train loss: 0.13053, val loss: 0.12954\nMain effects training epoch: 28, train loss: 0.12948, val loss: 0.12889\nMain effects training epoch: 29, train loss: 0.12958, val loss: 0.12925\nMain effects training epoch: 30, train loss: 0.13083, val loss: 0.13000\nMain effects training epoch: 31, train loss: 0.12925, val loss: 0.12867\nMain effects training epoch: 32, train loss: 0.12977, val loss: 0.12952\nMain effects training epoch: 33, train loss: 0.12875, val loss: 0.12819\nMain effects training epoch: 34, train loss: 0.12849, val loss: 0.12791\nMain effects training epoch: 35, train loss: 0.13101, val loss: 0.13084\nMain effects training epoch: 36, train loss: 0.12975, val loss: 0.12889\nMain effects training epoch: 37, train loss: 0.13002, val loss: 0.12982\nMain effects training epoch: 38, train loss: 0.12654, val loss: 0.12608\nMain effects training epoch: 39, train loss: 0.12616, val loss: 0.12567\nMain effects training epoch: 40, train loss: 0.12589, val loss: 0.12548\nMain effects training epoch: 41, train loss: 0.12686, val loss: 0.12658\nMain effects training epoch: 42, train loss: 0.12645, val loss: 0.12582\nMain effects training epoch: 43, train loss: 0.12568, val loss: 0.12525\nMain effects training epoch: 44, train loss: 0.12552, val loss: 0.12502\nMain effects training epoch: 45, train loss: 0.12545, val loss: 0.12498\nMain effects training epoch: 46, train loss: 0.12573, val loss: 0.12531\nMain effects training epoch: 47, train loss: 0.12608, val loss: 0.12545\nMain effects training epoch: 48, train loss: 0.12563, val loss: 0.12527\nMain effects training epoch: 49, train loss: 0.12538, val loss: 0.12479\nMain effects training epoch: 50, train loss: 0.12558, val loss: 0.12506\nMain effects training epoch: 51, train loss: 0.12531, val loss: 0.12487\nMain effects training epoch: 52, train loss: 0.12527, val loss: 0.12475\nMain effects training epoch: 53, train loss: 0.12568, val loss: 0.12521\nMain effects training epoch: 54, train loss: 0.12803, val loss: 0.12739\nMain effects training epoch: 55, train loss: 0.12549, val loss: 0.12515\nMain effects training epoch: 56, train loss: 0.12529, val loss: 0.12470\nMain effects training epoch: 57, train loss: 0.12526, val loss: 0.12477\nMain effects training epoch: 58, train loss: 0.12571, val loss: 0.12511\nMain effects training epoch: 59, train loss: 0.12516, val loss: 0.12471\nMain effects training epoch: 60, train loss: 0.12540, val loss: 0.12483\nMain effects training epoch: 61, train loss: 0.12536, val loss: 0.12496\nMain effects training epoch: 62, train loss: 0.12507, val loss: 0.12449\nMain effects training epoch: 63, train loss: 0.12593, val loss: 0.12566\nMain effects training epoch: 64, train loss: 0.12504, val loss: 0.12447\nMain effects training epoch: 65, train loss: 0.12639, val loss: 0.12608\nMain effects training epoch: 66, train loss: 0.12566, val loss: 0.12523\nMain effects training epoch: 67, train loss: 0.12587, val loss: 0.12518\nMain effects training epoch: 68, train loss: 0.12541, val loss: 0.12485\nMain effects training epoch: 69, train loss: 0.12504, val loss: 0.12458\nMain effects training epoch: 70, train loss: 0.12505, val loss: 0.12442\nMain effects training epoch: 71, train loss: 0.12529, val loss: 0.12467\nMain effects training epoch: 72, train loss: 0.12639, val loss: 0.12566\nMain effects training epoch: 73, train loss: 0.12483, val loss: 0.12414\nMain effects training epoch: 74, train loss: 0.12498, val loss: 0.12437\nMain effects training epoch: 75, train loss: 0.12458, val loss: 0.12395\nMain effects training epoch: 76, train loss: 0.12412, val loss: 0.12349\nMain effects training epoch: 77, train loss: 0.12390, val loss: 0.12331\nMain effects training epoch: 78, train loss: 0.12351, val loss: 0.12286\nMain effects training epoch: 79, train loss: 0.12463, val loss: 0.12393\nMain effects training epoch: 80, train loss: 0.12334, val loss: 0.12277\nMain effects training epoch: 81, train loss: 0.12322, val loss: 0.12270\nMain effects training epoch: 82, train loss: 0.12370, val loss: 0.12316\nMain effects training epoch: 83, train loss: 0.12315, val loss: 0.12268\nMain effects training epoch: 84, train loss: 0.12323, val loss: 0.12283\nMain effects training epoch: 85, train loss: 0.12316, val loss: 0.12273\nMain effects training epoch: 86, train loss: 0.12347, val loss: 0.12318\nMain effects training epoch: 87, train loss: 0.12373, val loss: 0.12308\nMain effects training epoch: 88, train loss: 0.12351, val loss: 0.12319\nMain effects training epoch: 89, train loss: 0.12325, val loss: 0.12274\nMain effects training epoch: 90, train loss: 0.12306, val loss: 0.12258\nMain effects training epoch: 91, train loss: 0.12315, val loss: 0.12272\nMain effects training epoch: 92, train loss: 0.12307, val loss: 0.12263\nMain effects training epoch: 93, train loss: 0.12315, val loss: 0.12258\nMain effects training epoch: 94, train loss: 0.12331, val loss: 0.12285\nMain effects training epoch: 95, train loss: 0.12304, val loss: 0.12259\nMain effects training epoch: 96, train loss: 0.12309, val loss: 0.12261\nMain effects training epoch: 97, train loss: 0.12317, val loss: 0.12258\nMain effects training epoch: 98, train loss: 0.12297, val loss: 0.12255\nMain effects training epoch: 99, train loss: 0.12327, val loss: 0.12292\nMain effects training epoch: 100, train loss: 0.12309, val loss: 0.12261\nMain effects training epoch: 101, train loss: 0.12322, val loss: 0.12290\nMain effects training epoch: 102, train loss: 0.12311, val loss: 0.12266\nMain effects training epoch: 103, train loss: 0.12296, val loss: 0.12248\nMain effects training epoch: 104, train loss: 0.12346, val loss: 0.12305\nMain effects training epoch: 105, train loss: 0.12307, val loss: 0.12261\nMain effects training epoch: 106, train loss: 0.12293, val loss: 0.12237\nMain effects training epoch: 107, train loss: 0.12291, val loss: 0.12243\nMain effects training epoch: 108, train loss: 0.12323, val loss: 0.12288\nMain effects training epoch: 109, train loss: 0.12302, val loss: 0.12251\nMain effects training epoch: 110, train loss: 0.12352, val loss: 0.12317\nMain effects training epoch: 111, train loss: 0.12410, val loss: 0.12393\n"
],
[
"simu_dir = 'result'\ndata_dict_logs = model.final_gam_model.summary_logs(save_dict=False)\ndata_dict_logs.update({\"err_train_mf\":model.final_mf_model.mf_mae,\n \"err_val_mf\":model.final_mf_model.mf_valmae})\nplot_trajectory(data_dict_logs, folder=simu_dir, name=\"s1_traj_plot\", log_scale=True, save_png=False, save_eps=True)\nplot_regularization(data_dict_logs, folder=simu_dir, name=\"s1_regu_plot\", log_scale=True, save_png=False, save_eps=False)",
"_____no_output_____"
],
[
"global_visualize_density(data_dict, save_png=True, folder=simu_dir, name='s1_global')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7f950e17342caa7b34482cf65e4394b0257f660 | 535,463 | ipynb | Jupyter Notebook | assignment3/RNN_Captioning.ipynb | Purewhite2019/CS231n-2020-Assignment | c90a2e5e8fc0c38ce293cf627778a50ccf280351 | [
"MIT"
] | null | null | null | assignment3/RNN_Captioning.ipynb | Purewhite2019/CS231n-2020-Assignment | c90a2e5e8fc0c38ce293cf627778a50ccf280351 | [
"MIT"
] | null | null | null | assignment3/RNN_Captioning.ipynb | Purewhite2019/CS231n-2020-Assignment | c90a2e5e8fc0c38ce293cf627778a50ccf280351 | [
"MIT"
] | null | null | null | 631.442217 | 185,216 | 0.944809 | [
[
[
"# Image Captioning with RNNs\nIn this exercise you will implement a vanilla recurrent neural networks and use them it to train a model that can generate novel captions for images.",
"_____no_output_____"
],
[
"## Install h5py\nThe COCO dataset we will be using is stored in HDF5 format. To load HDF5 files, we will need to install the `h5py` Python package. From the command line, run: <br/>\n`pip install h5py` <br/>\nIf you receive a permissions error, you may need to run the command as root: <br/>\n```sudo pip install h5py```\n\nYou can also run commands directly from the Jupyter notebook by prefixing the command with the \"!\" character:",
"_____no_output_____"
]
],
[
[
"!pip install h5py",
"Requirement already satisfied: h5py in /home/purewhite/anaconda3/envs/cs231n/lib/python3.7/site-packages (3.2.1)\r\nRequirement already satisfied: cached-property in /home/purewhite/anaconda3/envs/cs231n/lib/python3.7/site-packages (from h5py) (1.5.2)\r\nRequirement already satisfied: numpy>=1.14.5 in /home/purewhite/anaconda3/envs/cs231n/lib/python3.7/site-packages (from h5py) (1.16.2)\r\n"
],
[
"# As usual, a bit of setup\nimport time, os, json\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array\nfrom cs231n.rnn_layers import *\nfrom cs231n.captioning_solver import CaptioningSolver\nfrom cs231n.classifiers.rnn import CaptioningRNN\nfrom cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions\nfrom cs231n.image_utils import image_from_url\n\n%matplotlib inline\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# for auto-reloading external modules\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\n%load_ext autoreload\n%autoreload 2\n\ndef rel_error(x, y):\n \"\"\" returns relative error \"\"\"\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))",
"_____no_output_____"
]
],
[
[
"# Microsoft COCO\nFor this exercise we will use the 2014 release of the [Microsoft COCO dataset](http://mscoco.org/) which has become the standard testbed for image captioning. The dataset consists of 80,000 training images and 40,000 validation images, each annotated with 5 captions written by workers on Amazon Mechanical Turk.\n\n**You should have already downloaded the data by changing to the `cs231n/datasets` directory and running the script `get_assignment3_data.sh`. If you haven't yet done so, run that script now. Warning: the COCO data download is ~1GB.**\n\nWe have preprocessed the data and extracted features for you already. For all images we have extracted features from the fc7 layer of the VGG-16 network pretrained on ImageNet; these features are stored in the files `train2014_vgg16_fc7.h5` and `val2014_vgg16_fc7.h5` respectively. To cut down on processing time and memory requirements, we have reduced the dimensionality of the features from 4096 to 512; these features can be found in the files `train2014_vgg16_fc7_pca.h5` and `val2014_vgg16_fc7_pca.h5`.\n\nThe raw images take up a lot of space (nearly 20GB) so we have not included them in the download. However all images are taken from Flickr, and URLs of the training and validation images are stored in the files `train2014_urls.txt` and `val2014_urls.txt` respectively. This allows you to download images on the fly for visualization. Since images are downloaded on-the-fly, **you must be connected to the internet to view images**.\n\nDealing with strings is inefficient, so we will work with an encoded version of the captions. Each word is assigned an integer ID, allowing us to represent a caption by a sequence of integers. The mapping between integer IDs and words is in the file `coco2014_vocab.json`, and you can use the function `decode_captions` from the file `cs231n/coco_utils.py` to convert numpy arrays of integer IDs back into strings.\n\nThere are a couple special tokens that we add to the vocabulary. We prepend a special `<START>` token and append an `<END>` token to the beginning and end of each caption respectively. Rare words are replaced with a special `<UNK>` token (for \"unknown\"). In addition, since we want to train with minibatches containing captions of different lengths, we pad short captions with a special `<NULL>` token after the `<END>` token and don't compute loss or gradient for `<NULL>` tokens. Since they are a bit of a pain, we have taken care of all implementation details around special tokens for you.\n\nYou can load all of the MS-COCO data (captions, features, URLs, and vocabulary) using the `load_coco_data` function from the file `cs231n/coco_utils.py`. Run the following cell to do so:",
"_____no_output_____"
]
],
[
[
"# Load COCO data from disk; this returns a dictionary\n# We'll work with dimensionality-reduced features for this notebook, but feel\n# free to experiment with the original features by changing the flag below.\ndata = load_coco_data(pca_features=True)\n\n# Print out all the keys and values from the data dictionary\nfor k, v in data.items():\n if type(v) == np.ndarray:\n print(k, type(v), v.shape, v.dtype)\n else:\n print(k, type(v), len(v))",
"base dir /home/purewhite/workspace/CS231n-2020-Assignment/assignment3/cs231n/datasets/coco_captioning\ntrain_captions <class 'numpy.ndarray'> (400135, 17) int32\ntrain_image_idxs <class 'numpy.ndarray'> (400135,) int32\nval_captions <class 'numpy.ndarray'> (195954, 17) int32\nval_image_idxs <class 'numpy.ndarray'> (195954,) int32\ntrain_features <class 'numpy.ndarray'> (82783, 512) float32\nval_features <class 'numpy.ndarray'> (40504, 512) float32\nidx_to_word <class 'list'> 1004\nword_to_idx <class 'dict'> 1004\ntrain_urls <class 'numpy.ndarray'> (82783,) <U63\nval_urls <class 'numpy.ndarray'> (40504,) <U63\n"
]
],
[
[
"## Look at the data\nIt is always a good idea to look at examples from the dataset before working with it.\n\nYou can use the `sample_coco_minibatch` function from the file `cs231n/coco_utils.py` to sample minibatches of data from the data structure returned from `load_coco_data`. Run the following to sample a small minibatch of training data and show the images and their captions. Running it multiple times and looking at the results helps you to get a sense of the dataset.\n\nNote that we decode the captions using the `decode_captions` function and that we download the images on-the-fly using their Flickr URL, so **you must be connected to the internet to view images**.",
"_____no_output_____"
]
],
[
[
"# Sample a minibatch and show the images and captions\nbatch_size = 3\n\ncaptions, features, urls = sample_coco_minibatch(data, batch_size=batch_size)\nfor i, (caption, url) in enumerate(zip(captions, urls)):\n plt.imshow(image_from_url(url))\n plt.axis('off')\n caption_str = decode_captions(caption, data['idx_to_word'])\n plt.title(caption_str)\n plt.show()",
"_____no_output_____"
]
],
[
[
"# Recurrent Neural Networks\nAs discussed in lecture, we will use recurrent neural network (RNN) language models for image captioning. The file `cs231n/rnn_layers.py` contains implementations of different layer types that are needed for recurrent neural networks, and the file `cs231n/classifiers/rnn.py` uses these layers to implement an image captioning model.\n\nWe will first implement different types of RNN layers in `cs231n/rnn_layers.py`.",
"_____no_output_____"
],
[
"# Vanilla RNN: step forward\nOpen the file `cs231n/rnn_layers.py`. This file implements the forward and backward passes for different types of layers that are commonly used in recurrent neural networks.\n\nFirst implement the function `rnn_step_forward` which implements the forward pass for a single timestep of a vanilla recurrent neural network. After doing so run the following to check your implementation. You should see errors on the order of e-8 or less.",
"_____no_output_____"
]
],
[
[
"N, D, H = 3, 10, 4\n\nx = np.linspace(-0.4, 0.7, num=N*D).reshape(N, D)\nprev_h = np.linspace(-0.2, 0.5, num=N*H).reshape(N, H)\nWx = np.linspace(-0.1, 0.9, num=D*H).reshape(D, H)\nWh = np.linspace(-0.3, 0.7, num=H*H).reshape(H, H)\nb = np.linspace(-0.2, 0.4, num=H)\n\nnext_h, _ = rnn_step_forward(x, prev_h, Wx, Wh, b)\nexpected_next_h = np.asarray([\n [-0.58172089, -0.50182032, -0.41232771, -0.31410098],\n [ 0.66854692, 0.79562378, 0.87755553, 0.92795967],\n [ 0.97934501, 0.99144213, 0.99646691, 0.99854353]])\n\nprint('next_h error: ', rel_error(expected_next_h, next_h))",
"_____no_output_____"
]
],
[
[
"# Vanilla RNN: step backward\nIn the file `cs231n/rnn_layers.py` implement the `rnn_step_backward` function. After doing so run the following to numerically gradient check your implementation. You should see errors on the order of `e-8` or less.",
"_____no_output_____"
]
],
[
[
"from cs231n.rnn_layers import rnn_step_forward, rnn_step_backward\nnp.random.seed(231)\nN, D, H = 4, 5, 6\nx = np.random.randn(N, D)\nh = np.random.randn(N, H)\nWx = np.random.randn(D, H)\nWh = np.random.randn(H, H)\nb = np.random.randn(H)\n\nout, cache = rnn_step_forward(x, h, Wx, Wh, b)\n\ndnext_h = np.random.randn(*out.shape)\n\nfx = lambda x: rnn_step_forward(x, h, Wx, Wh, b)[0]\nfh = lambda prev_h: rnn_step_forward(x, h, Wx, Wh, b)[0]\nfWx = lambda Wx: rnn_step_forward(x, h, Wx, Wh, b)[0]\nfWh = lambda Wh: rnn_step_forward(x, h, Wx, Wh, b)[0]\nfb = lambda b: rnn_step_forward(x, h, Wx, Wh, b)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dnext_h)\ndprev_h_num = eval_numerical_gradient_array(fh, h, dnext_h)\ndWx_num = eval_numerical_gradient_array(fWx, Wx, dnext_h)\ndWh_num = eval_numerical_gradient_array(fWh, Wh, dnext_h)\ndb_num = eval_numerical_gradient_array(fb, b, dnext_h)\n\ndx, dprev_h, dWx, dWh, db = rnn_step_backward(dnext_h, cache)\n\nprint('dx error: ', rel_error(dx_num, dx))\nprint('dprev_h error: ', rel_error(dprev_h_num, dprev_h))\nprint('dWx error: ', rel_error(dWx_num, dWx))\nprint('dWh error: ', rel_error(dWh_num, dWh))\nprint('db error: ', rel_error(db_num, db))",
"_____no_output_____"
]
],
[
[
"# Vanilla RNN: forward\nNow that you have implemented the forward and backward passes for a single timestep of a vanilla RNN, you will combine these pieces to implement a RNN that processes an entire sequence of data.\n\nIn the file `cs231n/rnn_layers.py`, implement the function `rnn_forward`. This should be implemented using the `rnn_step_forward` function that you defined above. After doing so run the following to check your implementation. You should see errors on the order of `e-7` or less.",
"_____no_output_____"
]
],
[
[
"N, T, D, H = 2, 3, 4, 5\n\nx = np.linspace(-0.1, 0.3, num=N*T*D).reshape(N, T, D)\nh0 = np.linspace(-0.3, 0.1, num=N*H).reshape(N, H)\nWx = np.linspace(-0.2, 0.4, num=D*H).reshape(D, H)\nWh = np.linspace(-0.4, 0.1, num=H*H).reshape(H, H)\nb = np.linspace(-0.7, 0.1, num=H)\n\nh, _ = rnn_forward(x, h0, Wx, Wh, b)\nexpected_h = np.asarray([\n [\n [-0.42070749, -0.27279261, -0.11074945, 0.05740409, 0.22236251],\n [-0.39525808, -0.22554661, -0.0409454, 0.14649412, 0.32397316],\n [-0.42305111, -0.24223728, -0.04287027, 0.15997045, 0.35014525],\n ],\n [\n [-0.55857474, -0.39065825, -0.19198182, 0.02378408, 0.23735671],\n [-0.27150199, -0.07088804, 0.13562939, 0.33099728, 0.50158768],\n [-0.51014825, -0.30524429, -0.06755202, 0.17806392, 0.40333043]]])\nprint('h error: ', rel_error(expected_h, h))",
"_____no_output_____"
]
],
[
[
"# Vanilla RNN: backward\nIn the file `cs231n/rnn_layers.py`, implement the backward pass for a vanilla RNN in the function `rnn_backward`. This should run back-propagation over the entire sequence, making calls to the `rnn_step_backward` function that you defined earlier. You should see errors on the order of e-6 or less.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n\nN, D, T, H = 2, 3, 10, 5\n\nx = np.random.randn(N, T, D)\nh0 = np.random.randn(N, H)\nWx = np.random.randn(D, H)\nWh = np.random.randn(H, H)\nb = np.random.randn(H)\n\nout, cache = rnn_forward(x, h0, Wx, Wh, b)\n\ndout = np.random.randn(*out.shape)\n\ndx, dh0, dWx, dWh, db = rnn_backward(dout, cache)\n\nfx = lambda x: rnn_forward(x, h0, Wx, Wh, b)[0]\nfh0 = lambda h0: rnn_forward(x, h0, Wx, Wh, b)[0]\nfWx = lambda Wx: rnn_forward(x, h0, Wx, Wh, b)[0]\nfWh = lambda Wh: rnn_forward(x, h0, Wx, Wh, b)[0]\nfb = lambda b: rnn_forward(x, h0, Wx, Wh, b)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\ndh0_num = eval_numerical_gradient_array(fh0, h0, dout)\ndWx_num = eval_numerical_gradient_array(fWx, Wx, dout)\ndWh_num = eval_numerical_gradient_array(fWh, Wh, dout)\ndb_num = eval_numerical_gradient_array(fb, b, dout)\n\nprint('dx error: ', rel_error(dx_num, dx))\nprint('dh0 error: ', rel_error(dh0_num, dh0))\nprint('dWx error: ', rel_error(dWx_num, dWx))\nprint('dWh error: ', rel_error(dWh_num, dWh))\nprint('db error: ', rel_error(db_num, db))",
"_____no_output_____"
]
],
[
[
"# Word embedding: forward\nIn deep learning systems, we commonly represent words using vectors. Each word of the vocabulary will be associated with a vector, and these vectors will be learned jointly with the rest of the system.\n\nIn the file `cs231n/rnn_layers.py`, implement the function `word_embedding_forward` to convert words (represented by integers) into vectors. Run the following to check your implementation. You should see an error on the order of `e-8` or less.",
"_____no_output_____"
]
],
[
[
"N, T, V, D = 2, 4, 5, 3\n\nx = np.asarray([[0, 3, 1, 2], [2, 1, 0, 3]])\nW = np.linspace(0, 1, num=V*D).reshape(V, D)\n\nout, _ = word_embedding_forward(x, W)\nexpected_out = np.asarray([\n [[ 0., 0.07142857, 0.14285714],\n [ 0.64285714, 0.71428571, 0.78571429],\n [ 0.21428571, 0.28571429, 0.35714286],\n [ 0.42857143, 0.5, 0.57142857]],\n [[ 0.42857143, 0.5, 0.57142857],\n [ 0.21428571, 0.28571429, 0.35714286],\n [ 0., 0.07142857, 0.14285714],\n [ 0.64285714, 0.71428571, 0.78571429]]])\n\nprint('out error: ', rel_error(expected_out, out))",
"_____no_output_____"
]
],
[
[
"# Word embedding: backward\nImplement the backward pass for the word embedding function in the function `word_embedding_backward`. After doing so run the following to numerically gradient check your implementation. You should see an error on the order of `e-11` or less.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n\nN, T, V, D = 50, 3, 5, 6\nx = np.random.randint(V, size=(N, T))\nW = np.random.randn(V, D)\n\nout, cache = word_embedding_forward(x, W)\ndout = np.random.randn(*out.shape)\ndW = word_embedding_backward(dout, cache)\n\nf = lambda W: word_embedding_forward(x, W)[0]\ndW_num = eval_numerical_gradient_array(f, W, dout)\n\nprint('dW error: ', rel_error(dW, dW_num))",
"_____no_output_____"
]
],
[
[
"# Temporal Affine layer\nAt every timestep we use an affine function to transform the RNN hidden vector at that timestep into scores for each word in the vocabulary. Because this is very similar to the affine layer that you implemented in assignment 2, we have provided this function for you in the `temporal_affine_forward` and `temporal_affine_backward` functions in the file `cs231n/rnn_layers.py`. Run the following to perform numeric gradient checking on the implementation. You should see errors on the order of e-9 or less.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n\n# Gradient check for temporal affine layer\nN, T, D, M = 2, 3, 4, 5\nx = np.random.randn(N, T, D)\nw = np.random.randn(D, M)\nb = np.random.randn(M)\n\nout, cache = temporal_affine_forward(x, w, b)\n\ndout = np.random.randn(*out.shape)\n\nfx = lambda x: temporal_affine_forward(x, w, b)[0]\nfw = lambda w: temporal_affine_forward(x, w, b)[0]\nfb = lambda b: temporal_affine_forward(x, w, b)[0]\n\ndx_num = eval_numerical_gradient_array(fx, x, dout)\ndw_num = eval_numerical_gradient_array(fw, w, dout)\ndb_num = eval_numerical_gradient_array(fb, b, dout)\n\ndx, dw, db = temporal_affine_backward(dout, cache)\n\nprint('dx error: ', rel_error(dx_num, dx))\nprint('dw error: ', rel_error(dw_num, dw))\nprint('db error: ', rel_error(db_num, db))",
"_____no_output_____"
]
],
[
[
"# Temporal Softmax loss\nIn an RNN language model, at every timestep we produce a score for each word in the vocabulary. We know the ground-truth word at each timestep, so we use a softmax loss function to compute loss and gradient at each timestep. We sum the losses over time and average them over the minibatch.\n\nHowever there is one wrinkle: since we operate over minibatches and different captions may have different lengths, we append `<NULL>` tokens to the end of each caption so they all have the same length. We don't want these `<NULL>` tokens to count toward the loss or gradient, so in addition to scores and ground-truth labels our loss function also accepts a `mask` array that tells it which elements of the scores count towards the loss.\n\nSince this is very similar to the softmax loss function you implemented in assignment 1, we have implemented this loss function for you; look at the `temporal_softmax_loss` function in the file `cs231n/rnn_layers.py`.\n\nRun the following cell to sanity check the loss and perform numeric gradient checking on the function. You should see an error for dx on the order of e-7 or less.",
"_____no_output_____"
]
],
[
[
"# Sanity check for temporal softmax loss\nfrom cs231n.rnn_layers import temporal_softmax_loss\n\nN, T, V = 100, 1, 10\n\ndef check_loss(N, T, V, p):\n x = 0.001 * np.random.randn(N, T, V)\n y = np.random.randint(V, size=(N, T))\n mask = np.random.rand(N, T) <= p\n print(temporal_softmax_loss(x, y, mask)[0])\n \ncheck_loss(100, 1, 10, 1.0) # Should be about 2.3\ncheck_loss(100, 10, 10, 1.0) # Should be about 23\ncheck_loss(5000, 10, 10, 0.1) # Should be within 2.2-2.4\n\n# Gradient check for temporal softmax loss\nN, T, V = 7, 8, 9\n\nx = np.random.randn(N, T, V)\ny = np.random.randint(V, size=(N, T))\nmask = (np.random.rand(N, T) > 0.5)\n\nloss, dx = temporal_softmax_loss(x, y, mask, verbose=False)\n\ndx_num = eval_numerical_gradient(lambda x: temporal_softmax_loss(x, y, mask)[0], x, verbose=False)\n\nprint('dx error: ', rel_error(dx, dx_num))",
"_____no_output_____"
]
],
[
[
"# RNN for image captioning\nNow that you have implemented the necessary layers, you can combine them to build an image captioning model. Open the file `cs231n/classifiers/rnn.py` and look at the `CaptioningRNN` class.\n\nImplement the forward and backward pass of the model in the `loss` function. For now you only need to implement the case where `cell_type='rnn'` for vanialla RNNs; you will implement the LSTM case later. After doing so, run the following to check your forward pass using a small test case; you should see error on the order of `e-10` or less.",
"_____no_output_____"
]
],
[
[
"N, D, W, H = 10, 20, 30, 40\nword_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}\nV = len(word_to_idx)\nT = 13\n\nmodel = CaptioningRNN(word_to_idx,\n input_dim=D,\n wordvec_dim=W,\n hidden_dim=H,\n cell_type='rnn',\n dtype=np.float64)\n\n# Set all model parameters to fixed values\nfor k, v in model.params.items():\n model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)\n\nfeatures = np.linspace(-1.5, 0.3, num=(N * D)).reshape(N, D)\ncaptions = (np.arange(N * T) % V).reshape(N, T)\n\nloss, grads = model.loss(features, captions)\nexpected_loss = 9.83235591003\n\nprint('loss: ', loss)\nprint('expected loss: ', expected_loss)\nprint('difference: ', abs(loss - expected_loss))",
"_____no_output_____"
]
],
[
[
"Run the following cell to perform numeric gradient checking on the `CaptioningRNN` class; you should see errors around the order of `e-6` or less.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n\nbatch_size = 2\ntimesteps = 3\ninput_dim = 4\nwordvec_dim = 5\nhidden_dim = 6\nword_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}\nvocab_size = len(word_to_idx)\n\ncaptions = np.random.randint(vocab_size, size=(batch_size, timesteps))\nfeatures = np.random.randn(batch_size, input_dim)\n\nmodel = CaptioningRNN(word_to_idx,\n input_dim=input_dim,\n wordvec_dim=wordvec_dim,\n hidden_dim=hidden_dim,\n cell_type='rnn',\n dtype=np.float64,\n )\n\nloss, grads = model.loss(features, captions)\n\nfor param_name in sorted(grads):\n f = lambda _: model.loss(features, captions)[0]\n param_grad_num = eval_numerical_gradient(f, model.params[param_name], verbose=False, h=1e-6)\n e = rel_error(param_grad_num, grads[param_name])\n print('%s relative error: %e' % (param_name, e))",
"_____no_output_____"
]
],
[
[
"# Overfit small data\nSimilar to the `Solver` class that we used to train image classification models on the previous assignment, on this assignment we use a `CaptioningSolver` class to train image captioning models. Open the file `cs231n/captioning_solver.py` and read through the `CaptioningSolver` class; it should look very familiar.\n\nOnce you have familiarized yourself with the API, run the following to make sure your model overfits a small sample of 100 training examples. You should see a final loss of less than 0.1.",
"_____no_output_____"
]
],
[
[
"np.random.seed(231)\n\nsmall_data = load_coco_data(max_train=50)\n\nsmall_rnn_model = CaptioningRNN(\n cell_type='rnn',\n word_to_idx=data['word_to_idx'],\n input_dim=data['train_features'].shape[1],\n hidden_dim=512,\n wordvec_dim=256,\n )\n\nsmall_rnn_solver = CaptioningSolver(small_rnn_model, small_data,\n update_rule='adam',\n num_epochs=50,\n batch_size=25,\n optim_config={\n 'learning_rate': 5e-3,\n },\n lr_decay=0.95,\n verbose=True, print_every=10,\n )\n\nsmall_rnn_solver.train()\n\n# Plot the training losses\nplt.plot(small_rnn_solver.loss_history)\nplt.xlabel('Iteration')\nplt.ylabel('Loss')\nplt.title('Training loss history')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Print final training loss. You should see a final loss of less than 0.1.",
"_____no_output_____"
]
],
[
[
"print('Final loss: ', small_rnn_solver.loss_history[-1])",
"_____no_output_____"
]
],
[
[
"# Test-time sampling\nUnlike classification models, image captioning models behave very differently at training time and at test time. At training time, we have access to the ground-truth caption, so we feed ground-truth words as input to the RNN at each timestep. At test time, we sample from the distribution over the vocabulary at each timestep, and feed the sample as input to the RNN at the next timestep.\n\nIn the file `cs231n/classifiers/rnn.py`, implement the `sample` method for test-time sampling. After doing so, run the following to sample from your overfitted model on both training and validation data. The samples on training data should be very good; the samples on validation data probably won't make sense.",
"_____no_output_____"
]
],
[
[
"for split in ['train', 'val']:\n minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)\n gt_captions, features, urls = minibatch\n gt_captions = decode_captions(gt_captions, data['idx_to_word'])\n\n sample_captions = small_rnn_model.sample(features)\n sample_captions = decode_captions(sample_captions, data['idx_to_word'])\n\n for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):\n plt.imshow(image_from_url(url))\n plt.title('%s\\n%s\\nGT:%s' % (split, sample_caption, gt_caption))\n plt.axis('off')\n plt.show()",
"_____no_output_____"
]
],
[
[
"# INLINE QUESTION 1\n\nIn our current image captioning setup, our RNN language model produces a word at every timestep as its output. However, an alternate way to pose the problem is to train the network to operate over _characters_ (e.g. 'a', 'b', etc.) as opposed to words, so that at it every timestep, it receives the previous character as input and tries to predict the next character in the sequence. For example, the network might generate a caption like\n\n'A', ' ', 'c', 'a', 't', ' ', 'o', 'n', ' ', 'a', ' ', 'b', 'e', 'd'\n\nCan you describe one advantage of an image-captioning model that uses a character-level RNN? Can you also describe one disadvantage? HINT: there are several valid answers, but it might be useful to compare the parameter space of word-level and character-level models.\n\n**Your Answer:** \n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7f952d1cd40cf789e8b4fa4c7b048a3bceece6a | 12,326 | ipynb | Jupyter Notebook | docs/build/html/_downloads/717b2ab272afe0e7360766f751fcd5b0/plot_turbo.ipynb | YinLiu-91/acdecom | f3ed7900f25177e3f1f3dd7368c5441185a15421 | [
"MIT"
] | 3 | 2020-09-19T11:08:18.000Z | 2021-01-20T03:52:22.000Z | docs/build/html/_downloads/717b2ab272afe0e7360766f751fcd5b0/plot_turbo.ipynb | YinLiu-91/acdecom | f3ed7900f25177e3f1f3dd7368c5441185a15421 | [
"MIT"
] | null | null | null | docs/build/html/_downloads/717b2ab272afe0e7360766f751fcd5b0/plot_turbo.ipynb | YinLiu-91/acdecom | f3ed7900f25177e3f1f3dd7368c5441185a15421 | [
"MIT"
] | 5 | 2020-07-27T10:33:24.000Z | 2021-04-16T11:29:35.000Z | 40.81457 | 916 | 0.579101 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\nThe noise scattering at a compressor inlet and outlet\n==================================================\n\n In this example we extract the scattering of noise at a compressor inlet and outlet. In addition to measuring the\n pressure with flush-mounted microphones, we will use the temperature, and flow velocity that was acquired during the\n measurement. The data comes from a study performed at the\n `Competence Center of Gas Exchange (CCGEx) <https://www.ccgex.kth.se/>`_\\.\n",
"_____no_output_____"
],
[
"![](../../image/compressor.JPG)\n\n :width: 800\n\n",
"_____no_output_____"
],
[
"1. Initialization\n-----------------\nFirst, we import the packages needed for this example.\n\n",
"_____no_output_____"
]
],
[
[
"import numpy\nimport matplotlib.pyplot as plt\nimport acdecom",
"_____no_output_____"
]
],
[
[
"The compressor intake and outlet have a circular cross section of the radius 0.026 m and 0.028 m.\nThe highest frequency of interest is 3200 Hz.\n\n",
"_____no_output_____"
]
],
[
[
"section = \"circular\"\nradius_intake = 0.026 # m\nradius_outlet = 0.028 # m\nf_max = 3200 # Hz",
"_____no_output_____"
]
],
[
[
"During the test, test ducts were mounted to the intake and outlet. Those ducts were equipped with three microphones\neach. The first microphone had a distance to the intake of 0.73 m and 1.17 m to the outlet.\n\n",
"_____no_output_____"
]
],
[
[
"distance_intake = 0.073 # m\ndistance_outlet = 1.17 # m",
"_____no_output_____"
]
],
[
[
"To analyze the measurement data, we create objects for the intake and the outlet test pipes.\n\n",
"_____no_output_____"
]
],
[
[
"td_intake = acdecom.WaveGuide(dimensions=(radius_intake,), cross_section=section, f_max=f_max, damping=\"kirchoff\",\n distance=distance_intake, flip_flow=True)\ntd_outlet = acdecom.WaveGuide(dimensions=(radius_outlet,), cross_section=section, f_max=f_max, damping=\"kirchoff\",\n distance=distance_outlet)",
"_____no_output_____"
]
],
[
[
"<div class=\"alert alert-info\"><h4>Note</h4><p>The standard flow direction is in $P_+$ direction. Therefore, on the intake side, the Mach-number must be\n either set negative or the argument *flipFlow* must be set to *True*.</p></div>\n\n2. Sensor Positions\n-------------------\nWe define lists with microphone positions at the intake and outlet and assign them to the *WaveGuides*.\n\n",
"_____no_output_____"
]
],
[
[
"z_intake = [0, 0.043, 0.324] # m\nr_intake = [radius_intake, radius_intake, radius_intake] # m\nphi_intake = [0, 180, 0] # deg\n\nz_outlet = [0, 0.054, 0.284] # m\nr_outlet = [radius_outlet, radius_outlet, radius_outlet] # m\nphi_outlet = [0, 180, 0] # deg\n\ntd_intake.set_microphone_positions(z_intake, r_intake, phi_intake, cylindrical_coordinates=True)\ntd_outlet.set_microphone_positions(z_outlet, r_outlet, phi_outlet, cylindrical_coordinates=True)",
"_____no_output_____"
]
],
[
[
"3. Decomposition\n----------------\nNext, we read the measurement data. The measurement must be pre-processed in a format that is understood by the\n*WaveGuide* object. This is generally a numpy.ndArray, wherein the columns contain the measurement data, such\nas the measured frequency, the pressure values for that frequency, the bulk Mach-number, and the temperature.\nThe rows can be different frequencies or different sound excitations (cases). In this example the measurement was\npost-processed into the `turbo.txt <https://github.com/ssackMWL/acdecom/blob/master/examples/data/turbo.txt>`_\nfile and can be loaded with the `numpy.loadtxt <https://numpy.org/doc/stable/reference/generated/numpy.loadtxt.html>`_\nfunction.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>The pressure used for the decomposition must be pre-processed, for example to account for microphone calibration.</p></div>\n\n",
"_____no_output_____"
]
],
[
[
"pressure = numpy.loadtxt(\"data/turbo.txt\",dtype=complex, delimiter=\",\", skiprows=1)",
"_____no_output_____"
]
],
[
[
"We review the file's header to understand how the data is stored in our input file.\n\n",
"_____no_output_____"
]
],
[
[
"with open(\"data/turbo.txt\") as pressure_file:\n print(pressure_file.readline().split(\",\"))",
"_____no_output_____"
]
],
[
[
"The Mach-numbers at the intake and outlet are stored in columns 0 and 1, the temperatures in columns 2 and 3,\nand the frequency in column 4. The intake microphones (1, 2, and 3) are in columns 5, 6, and 7. The outlet\nmicrophones (3, 5, and 6) are in columns 8, 9, and 10. The case number is in the last column.\n\n",
"_____no_output_____"
]
],
[
[
"Machnumber_intake = 0\nMachnumber_outlet= 1\ntemperature_intake = 2\ntemperature_outlet = 3\nf = 4\nmics_intake = [5, 6, 7]\nmics_outlet = [8, 9, 10]\ncase = -1",
"_____no_output_____"
]
],
[
[
"Next, we decompose the sound-fields into the propagating modes. We decompose the sound-fields on the intake\nand outlet side of the duct, using the two *WaveGuide* objects defined earlier.\n\n",
"_____no_output_____"
]
],
[
[
"decomp_intake, headers_intake = td_intake.decompose(pressure, f, mics_intake, temperature_col=temperature_intake,\n case_col=case, Mach_col=Machnumber_intake)\n\ndecomp_outlet, headers_outlet = td_outlet.decompose(pressure, f, mics_outlet, temperature_col=temperature_outlet,\n case_col=case, Mach_col=Machnumber_outlet)",
"_____no_output_____"
]
],
[
[
".. note ::\n The decomposition may show warnings for ill-conditioned modal matrices. This typically happens for frequencies close\n to the cut-on of a mode. However, it can also indicate that the microphone array is unable to separate the\n modes. The condition number of the wave decomposition is stored in the data returned by\n :meth:`.WaveGuide.decompose` and should be checked in case a warning is triggered.\n\n4. Further Post-processing\n--------------------------\n\nWe can print the *headersDS* to see the names of the columns of the arrays that store the decomposed sound fields.\n\n\n",
"_____no_output_____"
]
],
[
[
"print(headers_intake)",
"_____no_output_____"
]
],
[
[
"We use that information to extract the modal data.\n\n",
"_____no_output_____"
]
],
[
[
"minusmodes = [1] # from headers_intake\nplusmodes = [0]",
"_____no_output_____"
]
],
[
[
"Furthermore, we acquire the unique decomposed frequency points.\n\n",
"_____no_output_____"
]
],
[
[
"frequs = numpy.abs(numpy.unique(decomp_intake[:, headers_intake.index(\"f\")]))\nnof = frequs.shape[0]",
"_____no_output_____"
]
],
[
[
"For each of the frequencies, we can compute the scattering matrix by solving a linear system of equations\n$S = p_+ p_-^{-1}$\\, where $S$ is the scattering matrix and $p_{\\pm}$ are matrices containing the\nacoustic modes placed in rows and the different test cases placed in columns.\n\n<div class=\"alert alert-info\"><h4>Note</h4><p>Details for the computation of the Scattering Matrix and the procedure to measure the different test-cases can be\n found in `this study <https://www.ingentaconnect.com/content/dav/aaua/2016/00000102/00000005/art00008>`_\\.</p></div>\n\n\n",
"_____no_output_____"
]
],
[
[
"S = numpy.zeros((2,2,nof),dtype = complex)\n\nfor fIndx, f in enumerate(frequs):\n frequ_rows = numpy.where(decomp_intake[:, headers_intake.index(\"f\")] == f)\n ppm_intake = decomp_intake[frequ_rows]\n ppm_outlet = decomp_outlet[frequ_rows]\n pp = numpy.concatenate((ppm_intake[:,plusmodes].T, ppm_outlet[:,plusmodes].T))\n pm = numpy.concatenate((ppm_intake[:,minusmodes].T, ppm_outlet[:,minusmodes].T))\n S[:,:,fIndx] = numpy.dot(pp,numpy.linalg.pinv(pm))",
"_____no_output_____"
]
],
[
[
"5. Plot\n-------\nFinally, we can plot the transmission and reflection coefficients at the intake and outlet.\n\n",
"_____no_output_____"
]
],
[
[
"plt.plot(frequs, numpy.abs(S[0, 0, :]), ls=\"-\", color=\"#67A3C1\", label=\"Reflection Intake\")\nplt.plot(frequs, numpy.abs(S[0, 1, :]), ls=\"--\", color=\"#67A3C1\", label=\"Transmission Intake\")\nplt.plot(frequs, numpy.abs(S[1, 1, :]), ls=\"-\", color=\"#D38D7B\", label=\"Reflection Outlet\")\nplt.plot(frequs, numpy.abs(S[1 ,0, :]), ls=\"--\", color=\"#D38D7B\", label=\"Transmission Outlet\")\nplt.xlabel(\"Frequency [Hz]\")\nplt.ylabel(\"Scattering Magnitude\")\nplt.xlim([300,3200])\nplt.ylim([0,1.1])\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f96571bafb13bc3fb6691bed32646479923626 | 52,897 | ipynb | Jupyter Notebook | 20210714/analysis_for_data.ipynb | Brook1711/openda1 | 1d67912083ecf60b04daa6d9cf377339d179b1aa | [
"Apache-2.0"
] | null | null | null | 20210714/analysis_for_data.ipynb | Brook1711/openda1 | 1d67912083ecf60b04daa6d9cf377339d179b1aa | [
"Apache-2.0"
] | null | null | null | 20210714/analysis_for_data.ipynb | Brook1711/openda1 | 1d67912083ecf60b04daa6d9cf377339d179b1aa | [
"Apache-2.0"
] | 1 | 2021-07-18T16:01:56.000Z | 2021-07-18T16:01:56.000Z | 35.549059 | 148 | 0.381534 | [
[
[
"import pandas as pd\nimport json \nimport numpy as np\nimport ast\nfrom datetime import datetime\nimport plotly.graph_objs as go\nfrom plotly.offline import plot\nimport plotly.offline as offline\nfrom pandas.core.indexes import interval\nimport re",
"_____no_output_____"
],
[
"df = pd.read_excel('./data/data.xlsx') \nfor row in range(len(df)):\n df._set_value(row, 'user', df.loc[row,'user'][-5:])\ndf",
"_____no_output_____"
],
[
"len(df)",
"_____no_output_____"
],
[
"# g_contest = df.groupby('contest_id')\npaper_list=[list(df.groupby('contest_id'))[i][0][9:] for i in range(len(df.groupby('contest_id')))]\npaper_list\n# user_list=[re.findall(r\"\\d+\",list(df.groupby('user'))[i][0]) for i in range(len(df.groupby('contest_id')))]\nuser_list = ['10075','10076']",
"_____no_output_____"
],
[
"re.findall(r\"\\d+\", 'mianyang/人文素养/10075')[0]",
"_____no_output_____"
],
[
"'mianyang/人文素养/10075'[-5:]",
"_____no_output_____"
],
[
"res_dic={}\nfor paper in paper_list:\n res_dic[paper]={}\n for user in user_list:\n res_dic[paper][user]={'index':[],'data':[]}\nres_dic",
"_____no_output_____"
],
[
"def remove_str_per_row(data_per_row):\n frame_list = ast.literal_eval(data_per_row)\n frame_dic_list = []\n for index in range(len(frame_list)):\n temp = json.loads(frame_list[index])\n if 'frame' in temp.keys():\n if 'data' in temp.keys():\n frame_dic_list.append(list(temp['frame']['data'].values())) \n else:\n frame_dic_list.append(list(temp['frame'].values())) \n else:\n frame_dic_list.append(temp) \n return frame_dic_list\n\ndef remove_str(df):\n return 0\n# ndf_ans_8_list = []\n# ndf_rm_frame = []\n# for i in range(len(df)):\n# dic_temp = self.remove_str_per_row(self.df.loc[i,'task_answers'])\n# ndf_ans_8_list.append(dic_temp)\n# new_dic_list = []\n# for dic in dic_temp:\n# dic = dic['frame']\n# new_dic = dic\n# new_dic_list.append(new_dic)\n# ndf_rm_frame.append(new_dic_list)",
"_____no_output_____"
],
[
"test_str= df.loc[0,'task_answers']\na = remove_str_per_row(test_str)\na[1]",
"_____no_output_____"
],
[
"a[2]['basic']",
"_____no_output_____"
],
[
"a[2]['sketch']",
"_____no_output_____"
],
[
"for row in range(len(df)):\n user_index = df.loc[row, 'user']\n paper_index = df.loc[row, 'contest_id'][9:]\n cnt = 0\n for task_ans in remove_str_per_row(df.loc[row, 'task_answers']):\n cnt += 1\n res_dic[paper_index][user_index]['index'].append('task_' + str(cnt))\n res_dic[paper_index][user_index]['data'].append(task_ans)",
"_____no_output_____"
],
[
"res_dic['人文素养'].keys()\nres_dic['人文素养']['10076']",
"_____no_output_____"
],
[
"for paper_index in res_dic.keys():\n Writer = pd.ExcelWriter(\"./output/\"+paper_index+\".xlsx\") \n print(paper_index)\n for user_index in res_dic[paper_index].keys():\n print(user_index)\n out_pd = pd.DataFrame(index = res_dic[paper_index][user_index]['index'], data = {'data': res_dic[paper_index][user_index]['data']})\n out_pd.to_excel(Writer,user_index)\n Writer.save()\n\n",
"人文素养\n10075\n10076\n学生试测问卷\n10075\n10076\n智能计算素养\n10075\n10076\n问题解决素养\n10075\n10076\n高中生问卷一\n10075\n10076\n高中生问卷三\n10075\n10076\n"
],
[
"a = {'a':'b'}\nstr(a.values())",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f97e3d992f4a0aaa0fc81f120e65893f9ed1f6 | 310,584 | ipynb | Jupyter Notebook | Dimensionality Reduction/PCA/PCA_MaxAbsScaler.ipynb | mohityogesh44/ds-seed | e124f0078faf97568951e19e4302451ad0c7cf6c | [
"Apache-2.0"
] | null | null | null | Dimensionality Reduction/PCA/PCA_MaxAbsScaler.ipynb | mohityogesh44/ds-seed | e124f0078faf97568951e19e4302451ad0c7cf6c | [
"Apache-2.0"
] | null | null | null | Dimensionality Reduction/PCA/PCA_MaxAbsScaler.ipynb | mohityogesh44/ds-seed | e124f0078faf97568951e19e4302451ad0c7cf6c | [
"Apache-2.0"
] | null | null | null | 250.673123 | 241,808 | 0.891115 | [
[
[
"# PCA with MaxAbsScaler",
"_____no_output_____"
],
[
"This code template is for simple Principal Component Analysis(PCA) along feature scaling via MaxAbsScaler in python for dimensionality reduction technique. It is used to decompose a multivariate dataset into a set of successive orthogonal components that explain a maximum amount of the variance.",
"_____no_output_____"
],
[
"### Required Packages",
"_____no_output_____"
]
],
[
[
"import warnings \nimport itertools\nimport numpy as np \nimport pandas as pd \nimport seaborn as se \nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import LabelEncoder, MaxAbsScaler\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"### Initialization\n\nFilepath of CSV file",
"_____no_output_____"
]
],
[
[
"#filepath\nfile_path= ''",
"_____no_output_____"
]
],
[
[
"List of features which are required for model training .",
"_____no_output_____"
]
],
[
[
"#x_values\nfeatures= []",
"_____no_output_____"
]
],
[
[
"Target feature for prediction.",
"_____no_output_____"
]
],
[
[
"#y_value\ntarget= ''",
"_____no_output_____"
]
],
[
[
"### Data Fetching\n\nPandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.\n\nWe will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.",
"_____no_output_____"
]
],
[
[
"df=pd.read_csv(file_path)\ndf.head()",
"_____no_output_____"
]
],
[
[
"### Feature Selections\n\nIt is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.\n\nWe will assign all the required input features to X and target/outcome to Y.",
"_____no_output_____"
]
],
[
[
"X = df[features]\nY = df[target]",
"_____no_output_____"
]
],
[
[
"### Data Preprocessing\n\nSince the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.\n",
"_____no_output_____"
]
],
[
[
"def NullClearner(df):\n if(isinstance(df, pd.Series) and (df.dtype in [\"float64\",\"int64\"])):\n df.fillna(df.mean(),inplace=True)\n return df\n elif(isinstance(df, pd.Series)):\n df.fillna(df.mode()[0],inplace=True)\n return df\n else:return df\ndef EncodeX(df):\n return pd.get_dummies(df)",
"_____no_output_____"
],
[
"x=X.columns.to_list()\nfor i in x:\n X[i]=NullClearner(X[i]) \nX=EncodeX(X)\nY=NullClearner(Y)\nX.head()",
"_____no_output_____"
]
],
[
[
"#### Correlation Map\n\nIn order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.",
"_____no_output_____"
]
],
[
[
"f,ax = plt.subplots(figsize=(18, 18))\nmatrix = np.triu(X.corr())\nse.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Data Rescaling\n\nUsed sklearn.preprocessing.MaxAbsScaler\n\nScale each feature by its maximum absolute value.\nThis estimator scales and translates each feature individually such that the maximal absolute value of each feature in the training set will be 1.0. It does not shift/center the data, and thus does not destroy any sparsity.\n\nRead more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MaxAbsScaler.html)",
"_____no_output_____"
]
],
[
[
"X_Scaled=MaxAbsScaler().fit_transform(X)\nX=pd.DataFrame(X_Scaled,columns=X.columns)\nX.head()",
"_____no_output_____"
]
],
[
[
"### Choosing the number of components\n\nA vital part of using PCA in practice is the ability to estimate how many components are needed to describe the data. This can be determined by looking at the cumulative explained variance ratio as a function of the number of components.\n\nThis curve quantifies how much of the total, dimensional variance is contained within the first N components. ",
"_____no_output_____"
]
],
[
[
"pcaComponents = PCA().fit(X_Scaled)\nplt.plot(np.cumsum(pcaComponents.explained_variance_ratio_))\nplt.xlabel('number of components')\nplt.ylabel('cumulative explained variance');",
"_____no_output_____"
]
],
[
[
"#### Scree plot\nThe scree plot helps you to determine the optimal number of components. The eigenvalue of each component in the initial solution is plotted. Generally, you want to extract the components on the steep slope. The components on the shallow slope contribute little to the solution. ",
"_____no_output_____"
]
],
[
[
"PC_values = np.arange(pcaComponents.n_components_) + 1\nplt.plot(PC_values, pcaComponents.explained_variance_ratio_, 'ro-', linewidth=2)\nplt.title('Scree Plot')\nplt.xlabel('Principal Component')\nplt.ylabel('Proportion of Variance Explained')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Model\n\nPCA is used to decompose a multivariate dataset in a set of successive orthogonal components that explain a maximum amount of the variance. In scikit-learn, PCA is implemented as a transformer object that learns components in its fit method, and can be used on new data to project it on these components.\n\n#### Tunning parameters reference : \n[API](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=8)\npcaX = pd.DataFrame(data = pca.fit_transform(X_Scaled))",
"_____no_output_____"
]
],
[
[
"#### Output Dataframe",
"_____no_output_____"
]
],
[
[
"finalDf = pd.concat([pcaX, Y], axis = 1)\nfinalDf.head()",
"_____no_output_____"
]
],
[
[
"#### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7f986aa7cd31850039eae3c0e03324e80197696 | 120,750 | ipynb | Jupyter Notebook | 03_first_predict_for_pl.ipynb | Grzechu11/covid-19_timeseries-PL | 241c758247af126c6eccfeb3c4a49584ffe6e614 | [
"MIT"
] | null | null | null | 03_first_predict_for_pl.ipynb | Grzechu11/covid-19_timeseries-PL | 241c758247af126c6eccfeb3c4a49584ffe6e614 | [
"MIT"
] | null | null | null | 03_first_predict_for_pl.ipynb | Grzechu11/covid-19_timeseries-PL | 241c758247af126c6eccfeb3c4a49584ffe6e614 | [
"MIT"
] | null | null | null | 345.988539 | 30,643 | 0.738692 | [
[
[
"import os\nimport pandas as pd\nimport numpy as np\nimport datetime\n\nfrom IPython import get_ipython\n\nfrom fbprophet import Prophet\n\nfrom sklearn.metrics import mean_absolute_error as mae\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\n\nimport seaborn as sns\nsns.set()\n\nimport plotly\nimport plotly.graph_objs as go\nfrom plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot\nimport plotly.io as pio\ninit_notebook_mode(connected=True)",
"_____no_output_____"
],
[
"path = './data/time_series_20200323.csv'\ndf = pd.read_csv(path)\ndf['date'] = pd.to_datetime(df['date'])\ndf.head(10)",
"_____no_output_____"
],
[
"df_poland = df[df['country']=='Poland']\n\nprint(df_poland.size)\ndf_poland = df_poland[df_poland['date'] > pd.to_datetime('2020-03-01')]\nprint(df_poland.size)",
"427\n147\n"
],
[
"cut_days = 6\nmax_date = df_poland['date'].max()\nend_date = max_date + datetime.timedelta(days=-cut_days)\n\ntrain = df_poland[df_poland['date'] < end_date]\ntest = df_poland[df_poland['date'] >= end_date]\n\nprint(df_poland.size)\nprint(train.size)\nprint(test.size)",
"147\n98\n49\n"
],
[
"fig = go.Figure()\n\nfig.add_trace(go.Scatter(x=train.date, y=train.confirmed,\n mode='lines+markers',\n name='train'))\n\nfig.add_trace(go.Scatter(x=test.date, y=test.confirmed,\n mode='lines+markers',\n name='test'))\n \nplot(fig,filename='plots/pl_confirmed_to_train')",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\plotly\\offline\\offline.py:526: UserWarning:\n\nYour filename `plots/pl_confirmed_to_train` didn't end with .html. Adding .html to the end of your file.\n\n"
],
[
"fb_df = train[['date', 'confirmed']].copy()\nfb_df.columns = ['ds', 'y']\n\nfb_df.head()",
"_____no_output_____"
],
[
"m = Prophet(weekly_seasonality=False)\nm.fit(fb_df)",
"INFO:fbprophet:Disabling yearly seasonality. Run prophet with yearly_seasonality=True to override this.\nINFO:fbprophet:Disabling daily seasonality. Run prophet with daily_seasonality=True to override this.\nINFO:fbprophet:n_changepoints greater than number of observations. Using 10.\n"
],
[
"future = m.make_future_dataframe(periods=len(test), freq='W', include_history=False)\nfuture.head()",
"_____no_output_____"
],
[
"forecast = m.predict(future)\nforecast.head()",
"_____no_output_____"
],
[
"fig = go.Figure()\n\nfig.add_trace(go.Scatter(x=train.date, y=train.confirmed,\n mode='lines+markers',\n name='train'))\n \n\nfig.add_trace(go.Scatter(x=test.date, y=test.confirmed,\n mode='lines+markers',\n name='test'))\n \nfig.add_trace(go.Scatter(x=test.date, y=forecast.yhat,\n mode='lines+markers',\n name='forecast'))\n\nplot(fig,filename='plots/pl_confirmed_to_predict')",
"C:\\ProgramData\\Anaconda3\\lib\\site-packages\\plotly\\offline\\offline.py:526: UserWarning:\n\nYour filename `plots/pl_confirmed_to_predict` didn't end with .html. Adding .html to the end of your file.\n\n"
],
[
"ś",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f98ebf86a71ade2e8bba9ff796ef4fcf3b9530 | 117,853 | ipynb | Jupyter Notebook | BO_trials/multi_objective_bo.ipynb | michelleliu1027/Bayesian_PV | 5636980ae64712e7bf3c017ea4986fba7b858674 | [
"MIT"
] | 1 | 2021-09-08T07:51:19.000Z | 2021-09-08T07:51:19.000Z | BO_trials/multi_objective_bo.ipynb | michelleliu1027/Bayesian_PV | 5636980ae64712e7bf3c017ea4986fba7b858674 | [
"MIT"
] | 2 | 2021-09-29T19:11:20.000Z | 2021-09-29T23:23:22.000Z | BO_trials/multi_objective_bo.ipynb | michelleliu1027/Bayesian_PV | 5636980ae64712e7bf3c017ea4986fba7b858674 | [
"MIT"
] | 2 | 2021-09-19T05:24:32.000Z | 2021-12-06T03:39:10.000Z | 218.651206 | 62,366 | 0.897533 | [
[
[
"## Parallel, Multi-Objective BO in BoTorch with qEHVI and qParEGO\n\nIn this tutorial, we illustrate how to implement a simple multi-objective (MO) Bayesian Optimization (BO) closed loop in BoTorch.\n\nWe use the parallel ParEGO ($q$ParEGO) [1] and parallel Expected Hypervolume Improvement ($q$EHVI) [1] acquisition functions to optimize a synthetic Branin-Currin test function. The two objectives are\n\n$$f^{(1)}(x_1\\text{'}, x_2\\text{'}) = (x_2\\text{'} - \\frac{5.1}{4 \\pi^ 2} (x_1\\text{'})^2 + \\frac{5}{\\pi} x_1\\text{'} - r)^2 + 10 (1-\\frac{1}{8 \\pi}) \\cos(x_1\\text{'}) + 10$$\n\n$$f^{(2)}(x_1, x_2) = \\bigg[1 - \\exp\\bigg(-\\frac{1} {(2x_2)}\\bigg)\\bigg] \\frac{2300 x_1^3 + 1900x_1^2 + 2092 x_1 + 60}{100 x_1^3 + 500x_1^2 + 4x_1 + 20}$$\n\nwhere $x_1, x_2 \\in [0,1]$, $x_1\\text{'} = 15x_1 - 5$, and $x_2\\text{'} = 15x_2$ (parameter values can be found in `botorch/test_functions/multi_objective.py`).\n\nSince botorch assumes a maximization of all objectives, we seek to find the pareto frontier, the set of optimal trade-offs where improving one metric means deteriorating another.\n\n[1] [S. Daulton, M. Balandat, and E. Bakshy. Differentiable Expected Hypervolume Improvement for Parallel Multi-Objective Bayesian Optimization. Advances in Neural Information Processing Systems 33, 2020.](https://arxiv.org/abs/2006.05078)",
"_____no_output_____"
],
[
"### Set dtype and device\nNote: $q$EHVI aggressively exploits parallel hardware and is much faster when run on a GPU. See [1] for details.",
"_____no_output_____"
]
],
[
[
"import os\nimport torch\n\n\ntkwargs = {\n \"dtype\": torch.double,\n \"device\": torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n}\nSMOKE_TEST = os.environ.get(\"SMOKE_TEST\")",
"_____no_output_____"
]
],
[
[
"### Problem setup\n",
"_____no_output_____"
]
],
[
[
"from botorch.test_functions.multi_objective import BraninCurrin\n\n\nproblem = BraninCurrin(negate=True).to(**tkwargs)",
"_____no_output_____"
]
],
[
[
"#### Model initialization\n\nWe use a multi-output `SingleTaskGP` to model the two objectives with a homoskedastic Gaussian likelihood with an inferred noise level.\n\nThe models are initialized with $2(d+1)=6$ points drawn randomly from $[0,1]^2$.",
"_____no_output_____"
]
],
[
[
"from botorch.models.gp_regression import SingleTaskGP\nfrom botorch.models.transforms.outcome import Standardize\nfrom gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood\nfrom botorch.utils.transforms import unnormalize\nfrom botorch.utils.sampling import draw_sobol_samples\n\n\ndef generate_initial_data(n=6):\n # generate training data\n train_x = draw_sobol_samples(\n bounds=problem.bounds,n=1, q=n, seed=torch.randint(1000000, (1,)).item()\n ).squeeze(0)\n train_obj = problem(train_x)\n return train_x, train_obj\n\n\ndef initialize_model(train_x, train_obj):\n # define models for objective and constraint\n model = SingleTaskGP(train_x, train_obj, outcome_transform=Standardize(m=train_obj.shape[-1]))\n mll = ExactMarginalLogLikelihood(model.likelihood, model)\n return mll, model",
"_____no_output_____"
]
],
[
[
"#### Define a helper function that performs the essential BO step for $q$EHVI\nThe helper function below initializes the $q$EHVI acquisition function, optimizes it, and returns the batch $\\{x_1, x_2, \\ldots x_q\\}$ along with the observed function values. \n\nFor this example, we'll use a small batch of $q=4$. Passing the keyword argument `sequential=True` to the function `optimize_acqf`specifies that candidates should be optimized in a sequential greedy fashion (see [1] for details why this is important). A simple initialization heuristic is used to select the 20 restart initial locations from a set of 1024 random points. Multi-start optimization of the acquisition function is performed using LBFGS-B with exact gradients computed via auto-differentiation.\n\n**Reference Point**\n\n$q$EHVI requires specifying a reference point, which is the lower bound on the objectives used for computing hypervolume. In this tutorial, we assume the reference point is known. In practice the reference point can be set 1) using domain knowledge to be slightly worse than the lower bound of objective values, where the lower bound is the minimum acceptable value of interest for each objective, or 2) using a dynamic reference point selection strategy.\n\n**Partitioning the Non-dominated Space into disjoint rectangles**\n\n$q$EHVI requires partitioning the non-dominated space into disjoint rectangles (see [1] for details). \n\n*Note:* `NondominatedPartitioning` *will be very slow when 1) there are a lot of points on the pareto frontier and 2) there are >3 objectives.*",
"_____no_output_____"
]
],
[
[
"from botorch.optim.optimize import optimize_acqf, optimize_acqf_list\nfrom botorch.acquisition.objective import GenericMCObjective\nfrom botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization\nfrom botorch.utils.multi_objective.box_decompositions.non_dominated import NondominatedPartitioning\nfrom botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement\nfrom botorch.utils.sampling import sample_simplex\n\n\nBATCH_SIZE = 4 if not SMOKE_TEST else 2\nNUM_RESTARTS = 20 if not SMOKE_TEST else 2\nRAW_SAMPLES = 1024 if not SMOKE_TEST else 4\n\nstandard_bounds = torch.zeros(2, problem.dim, **tkwargs)\nstandard_bounds[1] = 1\n\n\ndef optimize_qehvi_and_get_observation(model, train_obj, sampler):\n \"\"\"Optimizes the qEHVI acquisition function, and returns a new candidate and observation.\"\"\"\n # partition non-dominated space into disjoint rectangles\n partitioning = NondominatedPartitioning(ref_point=problem.ref_point, Y=train_obj)\n acq_func = qExpectedHypervolumeImprovement(\n model=model,\n ref_point=problem.ref_point.tolist(), # use known reference point \n partitioning=partitioning,\n sampler=sampler,\n )\n # optimize\n candidates, _ = optimize_acqf(\n acq_function=acq_func,\n bounds=standard_bounds,\n q=BATCH_SIZE,\n num_restarts=NUM_RESTARTS,\n raw_samples=RAW_SAMPLES, # used for intialization heuristic\n options={\"batch_limit\": 5, \"maxiter\": 200, \"nonnegative\": True},\n sequential=True,\n )\n # observe new values \n new_x = unnormalize(candidates.detach(), bounds=problem.bounds)\n new_obj = problem(new_x)\n return new_x, new_obj",
"_____no_output_____"
]
],
[
[
"#### Define a helper function that performs the essential BO step for $q$ParEGO\nThe helper function below similarly initializes $q$ParEGO, optimizes it, and returns the batch $\\{x_1, x_2, \\ldots x_q\\}$ along with the observed function values. \n\n$q$ParEGO uses random augmented chebyshev scalarization with the `qExpectedImprovement` acquisition function. In the parallel setting ($q>1$), each candidate is optimized in sequential greedy fashion using a different random scalarization (see [1] for details).\n\nTo do this, we create a list of `qExpectedImprovement` acquisition functions, each with different random scalarization weights. The `optimize_acqf_list` method sequentially generates one candidate per acquisition function and conditions the next candidate (and acquisition function) on the previously selected pending candidates.",
"_____no_output_____"
]
],
[
[
"def optimize_qparego_and_get_observation(model, train_obj, sampler):\n \"\"\"Samples a set of random weights for each candidate in the batch, performs sequential greedy optimization \n of the qParEGO acquisition function, and returns a new candidate and observation.\"\"\"\n acq_func_list = []\n for _ in range(BATCH_SIZE):\n weights = sample_simplex(problem.num_objectives, **tkwargs).squeeze()\n objective = GenericMCObjective(get_chebyshev_scalarization(weights=weights, Y=train_obj))\n acq_func = qExpectedImprovement( # pyre-ignore: [28]\n model=model,\n objective=objective,\n best_f=objective(train_obj).max(),\n sampler=sampler,\n )\n acq_func_list.append(acq_func)\n # optimize\n candidates, _ = optimize_acqf_list(\n acq_function_list=acq_func_list,\n bounds=standard_bounds,\n num_restarts=NUM_RESTARTS,\n raw_samples=RAW_SAMPLES, # used for intialization heuristic\n options={\"batch_limit\": 5, \"maxiter\": 200},\n )\n # observe new values \n new_x = unnormalize(candidates.detach(), bounds=problem.bounds)\n new_obj = problem(new_x)\n return new_x, new_obj",
"_____no_output_____"
]
],
[
[
"### Perform Bayesian Optimization loop with $q$EHVI and $q$ParEGO\nThe Bayesian optimization \"loop\" for a batch size of $q$ simply iterates the following steps:\n1. given a surrogate model, choose a batch of points $\\{x_1, x_2, \\ldots x_q\\}$\n2. observe $f(x)$ for each $x$ in the batch \n3. update the surrogate model. \n\n\nJust for illustration purposes, we run three trials each of which do `N_BATCH=25` rounds of optimization. The acquisition function is approximated using `MC_SAMPLES=128` samples.\n\n*Note*: Running this may take a little while.",
"_____no_output_____"
]
],
[
[
"from botorch import fit_gpytorch_model\nfrom botorch.acquisition.monte_carlo import qExpectedImprovement, qNoisyExpectedImprovement\nfrom botorch.sampling.samplers import SobolQMCNormalSampler\nfrom botorch.exceptions import BadInitialCandidatesWarning\nfrom botorch.utils.multi_objective.pareto import is_non_dominated\nfrom botorch.utils.multi_objective.hypervolume import Hypervolume\n\nimport time\nimport warnings\n\n\nwarnings.filterwarnings('ignore', category=BadInitialCandidatesWarning)\nwarnings.filterwarnings('ignore', category=RuntimeWarning)\n\nN_TRIALS = 3 if not SMOKE_TEST else 2\nN_BATCH = 25 if not SMOKE_TEST else 3\nMC_SAMPLES = 128 if not SMOKE_TEST else 16\n\nverbose = False\n\nhvs_qparego_all, hvs_qehvi_all, hvs_random_all = [], [], []\n\nhv = Hypervolume(ref_point=problem.ref_point)\n\n\n# average over multiple trials\nfor trial in range(1, N_TRIALS + 1):\n torch.manual_seed(trial)\n \n print(f\"\\nTrial {trial:>2} of {N_TRIALS} \", end=\"\")\n hvs_qparego, hvs_qehvi, hvs_random = [], [], []\n \n # call helper functions to generate initial training data and initialize model\n train_x_qparego, train_obj_qparego = generate_initial_data(n=6)\n mll_qparego, model_qparego = initialize_model(train_x_qparego, train_obj_qparego)\n \n train_x_qehvi, train_obj_qehvi = train_x_qparego, train_obj_qparego\n train_x_random, train_obj_random = train_x_qparego, train_obj_qparego\n # compute hypervolume \n mll_qehvi, model_qehvi = initialize_model(train_x_qehvi, train_obj_qehvi)\n \n # compute pareto front\n pareto_mask = is_non_dominated(train_obj_qparego)\n pareto_y = train_obj_qparego[pareto_mask]\n # compute hypervolume\n \n volume = hv.compute(pareto_y)\n \n hvs_qparego.append(volume)\n hvs_qehvi.append(volume)\n hvs_random.append(volume)\n \n # run N_BATCH rounds of BayesOpt after the initial random batch\n for iteration in range(1, N_BATCH + 1): \n \n t0 = time.time()\n \n # fit the models\n fit_gpytorch_model(mll_qparego)\n fit_gpytorch_model(mll_qehvi)\n \n # define the qEI and qNEI acquisition modules using a QMC sampler\n qparego_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)\n qehvi_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)\n \n # optimize acquisition functions and get new observations\n new_x_qparego, new_obj_qparego = optimize_qparego_and_get_observation(\n model_qparego, train_obj_qparego, qparego_sampler\n )\n new_x_qehvi, new_obj_qehvi = optimize_qehvi_and_get_observation(\n model_qehvi, train_obj_qehvi, qehvi_sampler\n )\n new_x_random, new_obj_random = generate_initial_data(n=BATCH_SIZE)\n \n # update training points\n train_x_qparego = torch.cat([train_x_qparego, new_x_qparego])\n train_obj_qparego = torch.cat([train_obj_qparego, new_obj_qparego])\n\n train_x_qehvi = torch.cat([train_x_qehvi, new_x_qehvi])\n train_obj_qehvi = torch.cat([train_obj_qehvi, new_obj_qehvi])\n \n train_x_random = torch.cat([train_x_random, new_x_random])\n train_obj_random = torch.cat([train_obj_random, new_obj_random])\n \n\n # update progress\n for hvs_list, train_obj in zip(\n (hvs_random, hvs_qparego, hvs_qehvi), \n (train_obj_random, train_obj_qparego, train_obj_qehvi),\n ):\n # compute pareto front\n pareto_mask = is_non_dominated(train_obj)\n pareto_y = train_obj[pareto_mask]\n # compute hypervolume\n volume = hv.compute(pareto_y)\n hvs_list.append(volume)\n\n # reinitialize the models so they are ready for fitting on next iteration\n # Note: we find improved performance from not warm starting the model hyperparameters\n # using the hyperparameters from the previous iteration\n mll_qparego, model_qparego = initialize_model(train_x_qparego, train_obj_qparego)\n mll_qehvi, model_qehvi = initialize_model(train_x_qehvi, train_obj_qehvi)\n \n t1 = time.time()\n \n if verbose:\n print(\n f\"\\nBatch {iteration:>2}: Hypervolume (random, qParEGO, qEHVI) = \"\n f\"({hvs_random[-1]:>4.2f}, {hvs_qparego[-1]:>4.2f}, {hvs_qehvi[-1]:>4.2f}), \"\n f\"time = {t1-t0:>4.2f}.\", end=\"\"\n )\n else:\n print(\".\", end=\"\")\n \n hvs_qparego_all.append(hvs_qparego)\n hvs_qehvi_all.append(hvs_qehvi)\n hvs_random_all.append(hvs_random)",
"\nTrial 1 of 3 .........................\nTrial 2 of 3 .........................\nTrial 3 of 3 ........................."
]
],
[
[
"#### Plot the results\nThe plot below shows the a common metric of multi-objective optimization performance, the log hypervolume difference: the log difference between the hypervolume of the true pareto front and the hypervolume of the approximate pareto front identified by each algorithm. The log hypervolume difference is plotted at each step of the optimization for each of the algorithms. The confidence intervals represent the variance at that step in the optimization across the trial runs. The variance across optimization runs is quite high, so in order to get a better estimate of the average performance one would have to run a much larger number of trials `N_TRIALS` (we avoid this here to limit the runtime of this tutorial). \n\nThe plot show that $q$EHVI vastly outperforms the $q$ParEGO and Sobol baselines and has very low variance.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nfrom matplotlib import pyplot as plt\n\n%matplotlib inline\n\n\ndef ci(y):\n return 1.96 * y.std(axis=0) / np.sqrt(N_TRIALS)\n\n\niters = np.arange(N_BATCH + 1) * BATCH_SIZE\nlog_hv_difference_qparego = np.log10(problem.max_hv - np.asarray(hvs_qparego_all))\nlog_hv_difference_qehvi = np.log10(problem.max_hv - np.asarray(hvs_qehvi_all))\nlog_hv_difference_rnd = np.log10(problem.max_hv - np.asarray(hvs_random_all))\n\nfig, ax = plt.subplots(1, 1, figsize=(8, 6))\nax.errorbar(\n iters, log_hv_difference_rnd.mean(axis=0), yerr=ci(log_hv_difference_rnd),\n label=\"Sobol\", linewidth=1.5,\n)\nax.errorbar(\n iters, log_hv_difference_qparego.mean(axis=0), yerr=ci(log_hv_difference_qparego),\n label=\"qParEGO\", linewidth=1.5,\n)\nax.errorbar(\n iters, log_hv_difference_qehvi.mean(axis=0), yerr=ci(log_hv_difference_qehvi),\n label=\"qEHVI\", linewidth=1.5,\n)\nax.set(xlabel='number of observations (beyond initial points)', ylabel='Log Hypervolume Difference')\nax.legend(loc=\"lower right\")",
"_____no_output_____"
]
],
[
[
"#### plot the observations colored by iteration\n\nTo examine optimization process from another perspective, we plot the collected observations under each algorithm where the color corresponds to the BO iteration at which the point was collected. The plot on the right for $q$EHVI shows that the $q$EHVI quickly identifies the pareto front and most of its evaluations are very close to the pareto front. $q$ParEGO also identifies has many observations close to the pareto front, but relies on optimizing random scalarizations, which is a less principled way of optimizing the pareto front compared to $q$EHVI, which explicitly attempts focuses on improving the pareto front. Sobol generates random points and has few points close to the pareto front",
"_____no_output_____"
]
],
[
[
"from matplotlib.cm import ScalarMappable\n\n\nfig, axes = plt.subplots(1, 3, figsize=(17, 5))\nalgos = [\"Sobol\", \"qParEGO\", \"qEHVI\"]\ncm = plt.cm.get_cmap('viridis')\n\nbatch_number = torch.cat(\n [torch.zeros(6), torch.arange(1, N_BATCH+1).repeat(BATCH_SIZE, 1).t().reshape(-1)]\n).numpy()\nfor i, train_obj in enumerate((train_obj_random, train_obj_qparego, train_obj_qehvi)):\n sc = axes[i].scatter(\n train_obj[:, 0].cpu().numpy(), train_obj[:,1].cpu().numpy(), c=batch_number, alpha=0.8,\n )\n axes[i].set_title(algos[i])\n axes[i].set_xlabel(\"Objective 1\")\n axes[i].set_xlim(-260, 5)\n axes[i].set_ylim(-15, 0)\naxes[0].set_ylabel(\"Objective 2\")\nnorm = plt.Normalize(batch_number.min(), batch_number.max())\nsm = ScalarMappable(norm=norm, cmap=cm)\nsm.set_array([])\nfig.subplots_adjust(right=0.9)\ncbar_ax = fig.add_axes([0.93, 0.15, 0.01, 0.7])\ncbar = fig.colorbar(sm, cax=cbar_ax)\ncbar.ax.set_title(\"Iteration\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f99a1d8ad714a66ce47e93db89d0d29af182a1 | 25,777 | ipynb | Jupyter Notebook | aws/aws.ipynb | datascienceandml/data-science-ipython-notebooks | e1259144abc174c7bc4aacda0bab3578c92d7b24 | [
"Apache-2.0"
] | 5 | 2017-07-01T05:50:48.000Z | 2021-11-16T11:16:08.000Z | aws/aws.ipynb | ChristosChristofidis/data-science-ipython-notebooks | 2731cdc456f6e5bd8314f9987cb90aa681be1252 | [
"Apache-2.0"
] | null | null | null | aws/aws.ipynb | ChristosChristofidis/data-science-ipython-notebooks | 2731cdc456f6e5bd8314f9987cb90aa681be1252 | [
"Apache-2.0"
] | 10 | 2016-01-04T17:49:04.000Z | 2020-12-18T19:21:32.000Z | 27.306144 | 427 | 0.555728 | [
[
[
"<small><i>This notebook was prepared by [Donne Martin](http://donnemartin.com). Source and license info is on [GitHub](https://github.com/donnemartin/data-science-ipython-notebooks).</i></small>",
"_____no_output_____"
],
[
"# Amazon Web Services (AWS)\n\n* SSH to EC2\n* S3cmd\n* s3-parallel-put\n* S3DistCp\n* Redshift\n* Kinesis\n* Lambda",
"_____no_output_____"
],
[
"<h2 id=\"ssh-to-ec2\">SSH to EC2</h2>",
"_____no_output_____"
],
[
"Connect to an Ubuntu EC2 instance through SSH with the given key:",
"_____no_output_____"
]
],
[
[
"!ssh -i key.pem ubuntu@ipaddress",
"_____no_output_____"
]
],
[
[
"Connect to an Amazon Linux EC2 instance through SSH with the given key:",
"_____no_output_____"
]
],
[
[
"!ssh -i key.pem ec2-user@ipaddress",
"_____no_output_____"
]
],
[
[
"<h2 id=\"s3cmd\">S3cmd</h2>\n\nBefore I discovered [S3cmd](http://s3tools.org/s3cmd), I had been using the [S3 console](http://aws.amazon.com/console/) to do basic operations and [boto](https://boto.readthedocs.org/en/latest/) to do more of the heavy lifting. However, sometimes I just want to hack away at a command line to do my work.\n\nI've found S3cmd to be a great command line tool for interacting with S3 on AWS. S3cmd is written in Python, is open source, and is free even for commercial use. It offers more advanced features than those found in the [AWS CLI](http://aws.amazon.com/cli/).",
"_____no_output_____"
],
[
"Install s3cmd:",
"_____no_output_____"
]
],
[
[
"!sudo apt-get install s3cmd",
"_____no_output_____"
]
],
[
[
"Running the following command will prompt you to enter your AWS access and AWS secret keys. To follow security best practices, make sure you are using an IAM account as opposed to using the root account.\n\nI also suggest enabling GPG encryption which will encrypt your data at rest, and enabling HTTPS to encrypt your data in transit. Note this might impact performance.",
"_____no_output_____"
]
],
[
[
"!s3cmd --configure",
"_____no_output_____"
]
],
[
[
"Frequently used S3cmds:",
"_____no_output_____"
]
],
[
[
"# List all buckets\n!s3cmd ls\n\n# List the contents of the bucket\n!s3cmd ls s3://my-bucket-name\n\n# Upload a file into the bucket (private)\n!s3cmd put myfile.txt s3://my-bucket-name/myfile.txt\n\n# Upload a file into the bucket (public)\n!s3cmd put --acl-public --guess-mime-type myfile.txt s3://my-bucket-name/myfile.txt\n\n# Recursively upload a directory to s3\n!s3cmd put --recursive my-local-folder-path/ s3://my-bucket-name/mydir/\n\n# Download a file\n!s3cmd get s3://my-bucket-name/myfile.txt myfile.txt\n\n# Recursively download files that start with myfile\n!s3cmd --recursive get s3://my-bucket-name/myfile\n\n# Delete a file\n!s3cmd del s3://my-bucket-name/myfile.txt\n\n# Delete a bucket\n!s3cmd del --recursive s3://my-bucket-name/\n\n# Create a bucket\n!s3cmd mb s3://my-bucket-name\n\n# List bucket disk usage (human readable)\n!s3cmd du -H s3://my-bucket-name/\n\n# Sync local (source) to s3 bucket (destination)\n!s3cmd sync my-local-folder-path/ s3://my-bucket-name/\n\n# Sync s3 bucket (source) to local (destination)\n!s3cmd sync s3://my-bucket-name/ my-local-folder-path/\n\n# Do a dry-run (do not perform actual sync, but get information about what would happen)\n!s3cmd --dry-run sync s3://my-bucket-name/ my-local-folder-path/\n\n# Apply a standard shell wildcard include to sync s3 bucket (source) to local (destination)\n!s3cmd --include '2014-05-01*' sync s3://my-bucket-name/ my-local-folder-path/",
"_____no_output_____"
]
],
[
[
"<h2 id=\"s3-parallel-put\">s3-parallel-put</h2>\n\n[s3-parallel-put](https://github.com/twpayne/s3-parallel-put.git) is a great tool for uploading multiple files to S3 in parallel.",
"_____no_output_____"
],
[
"Install package dependencies:",
"_____no_output_____"
]
],
[
[
"!sudo apt-get install boto\n!sudo apt-get install git",
"_____no_output_____"
]
],
[
[
"Clone the s3-parallel-put repo:",
"_____no_output_____"
]
],
[
[
"!git clone https://github.com/twpayne/s3-parallel-put.git",
"_____no_output_____"
]
],
[
[
"Setup AWS keys for s3-parallel-put:",
"_____no_output_____"
]
],
[
[
"!export AWS_ACCESS_KEY_ID=XXX\n!export AWS_SECRET_ACCESS_KEY=XXX",
"_____no_output_____"
]
],
[
[
"Sample usage:",
"_____no_output_____"
]
],
[
[
"!s3-parallel-put --bucket=bucket --prefix=PREFIX SOURCE",
"_____no_output_____"
]
],
[
[
"Dry run of putting files in the current directory on S3 with the given S3 prefix, do not check first if they exist:",
"_____no_output_____"
]
],
[
[
"!s3-parallel-put --bucket=bucket --host=s3.amazonaws.com --put=stupid --dry-run --prefix=prefix/ ./",
"_____no_output_____"
]
],
[
[
"<h2 id=\"s3distcp\">S3DistCp</h2>\n\n[S3DistCp](http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/UsingEMR_s3distcp.html) is an extension of DistCp that is optimized to work with Amazon S3. S3DistCp is useful for combining smaller files and aggregate them together, taking in a pattern and target file to combine smaller input files to larger ones. S3DistCp can also be used to transfer large volumes of data from S3 to your Hadoop cluster.",
"_____no_output_____"
],
[
"To run S3DistCp with the EMR command line, ensure you are using the proper version of Ruby:",
"_____no_output_____"
]
],
[
[
"!rvm --default ruby-1.8.7-p374",
"_____no_output_____"
]
],
[
[
"The EMR command line below executes the following:\n* Create a master node and slave nodes of type m1.small\n* Runs S3DistCp on the source bucket location and concatenates files that match the date regular expression, resulting in files that are roughly 1024 MB or 1 GB\n* Places the results in the destination bucket",
"_____no_output_____"
]
],
[
[
"!./elastic-mapreduce --create --instance-group master --instance-count 1 \\\n--instance-type m1.small --instance-group core --instance-count 4 \\\n--instance-type m1.small --jar /home/hadoop/lib/emr-s3distcp-1.0.jar \\\n--args \"--src,s3://my-bucket-source/,--groupBy,.*([0-9]{4}-01).*,\\\n--dest,s3://my-bucket-dest/,--targetSize,1024\"",
"_____no_output_____"
]
],
[
[
"For further optimization, compression can be helpful to save on AWS storage and bandwidth costs, to speed up the S3 to/from EMR transfer, and to reduce disk I/O. Note that compressed files are not easy to split for Hadoop. For example, Hadoop uses a single mapper per GZIP file, as it does not know about file boundaries.\n\nWhat type of compression should you use?\n\n* Time sensitive job: Snappy or LZO\n* Large amounts of data: GZIP\n* General purpose: GZIP, as it’s supported by most platforms\n\nYou can specify the compression codec (gzip, lzo, snappy, or none) to use for copied files with S3DistCp with –outputCodec. If no value is specified, files are copied with no compression change. The code below sets the compression to lzo:",
"_____no_output_____"
]
],
[
[
"--outputCodec,lzo",
"_____no_output_____"
]
],
[
[
"<h2 id=\"redshift\">Redshift</h2>",
"_____no_output_____"
],
[
"Copy values from the given S3 location containing CSV files to a Redshift cluster:",
"_____no_output_____"
]
],
[
[
"copy table_name from 's3://source/part'\ncredentials 'aws_access_key_id=XXX;aws_secret_access_key=XXX'\ncsv;",
"_____no_output_____"
]
],
[
[
"Copy values from the given location containing TSV files to a Redshift cluster:",
"_____no_output_____"
]
],
[
[
"copy table_name from 's3://source/part'\ncredentials 'aws_access_key_id=XXX;aws_secret_access_key=XXX'\ncsv delimiter '\\t';",
"_____no_output_____"
]
],
[
[
"View Redshift errors:",
"_____no_output_____"
]
],
[
[
"select * from stl_load_errors;",
"_____no_output_____"
]
],
[
[
"Vacuum Redshift in full:",
"_____no_output_____"
]
],
[
[
"VACUUM FULL;",
"_____no_output_____"
]
],
[
[
"Analyze the compression of a table:",
"_____no_output_____"
]
],
[
[
"analyze compression table_name;",
"_____no_output_____"
]
],
[
[
"Cancel the query with the specified id:",
"_____no_output_____"
]
],
[
[
"cancel 18764;",
"_____no_output_____"
]
],
[
[
"The CANCEL command will not abort a transaction. To abort or roll back a transaction, you must use the ABORT or ROLLBACK command. To cancel a query associated with a transaction, first cancel the query then abort the transaction.\n\nIf the query that you canceled is associated with a transaction, use the ABORT or ROLLBACK. command to cancel the transaction and discard any changes made to the data:",
"_____no_output_____"
]
],
[
[
"abort;",
"_____no_output_____"
]
],
[
[
"Reference table creation and setup:",
"_____no_output_____"
],
[
"![alt text](http://docs.aws.amazon.com/redshift/latest/dg/images/tutorial-optimize-tables-ssb-data-model.png)",
"_____no_output_____"
]
],
[
[
"CREATE TABLE part (\n p_partkey integer not null sortkey distkey,\n p_name varchar(22) not null,\n p_mfgr varchar(6) not null,\n p_category varchar(7) not null,\n p_brand1 varchar(9) not null,\n p_color varchar(11) not null,\n p_type varchar(25) not null,\n p_size integer not null,\n p_container varchar(10) not null\n);\n\nCREATE TABLE supplier (\n s_suppkey integer not null sortkey,\n s_name varchar(25) not null,\n s_address varchar(25) not null,\n s_city varchar(10) not null,\n s_nation varchar(15) not null,\n s_region varchar(12) not null,\n s_phone varchar(15) not null)\ndiststyle all;\n\nCREATE TABLE customer (\n c_custkey integer not null sortkey,\n c_name varchar(25) not null,\n c_address varchar(25) not null,\n c_city varchar(10) not null,\n c_nation varchar(15) not null,\n c_region varchar(12) not null,\n c_phone varchar(15) not null,\n c_mktsegment varchar(10) not null)\ndiststyle all;\n\nCREATE TABLE dwdate (\n d_datekey integer not null sortkey,\n d_date varchar(19) not null,\n d_dayofweek varchar(10) not null,\n d_month varchar(10) not null,\n d_year integer not null,\n d_yearmonthnum integer not null,\n d_yearmonth varchar(8) not null,\n d_daynuminweek integer not null,\n d_daynuminmonth integer not null,\n d_daynuminyear integer not null,\n d_monthnuminyear integer not null,\n d_weeknuminyear integer not null,\n d_sellingseason varchar(13) not null,\n d_lastdayinweekfl varchar(1) not null,\n d_lastdayinmonthfl varchar(1) not null,\n d_holidayfl varchar(1) not null,\n d_weekdayfl varchar(1) not null)\ndiststyle all;\n\nCREATE TABLE lineorder (\n lo_orderkey integer not null,\n lo_linenumber integer not null,\n lo_custkey integer not null,\n lo_partkey integer not null distkey,\n lo_suppkey integer not null,\n lo_orderdate integer not null sortkey,\n lo_orderpriority varchar(15) not null,\n lo_shippriority varchar(1) not null,\n lo_quantity integer not null,\n lo_extendedprice integer not null,\n lo_ordertotalprice integer not null,\n lo_discount integer not null,\n lo_revenue integer not null,\n lo_supplycost integer not null,\n lo_tax integer not null,\n lo_commitdate integer not null,\n lo_shipmode varchar(10) not null\n);",
"_____no_output_____"
]
],
[
[
"| Table name | Sort Key | Distribution Style |\n|------------|--------------|--------------------|\n| LINEORDER | lo_orderdate | lo_partkey |\n| PART | p_partkey | p_partkey |\n| CUSTOMER | c_custkey | ALL |\n| SUPPLIER | s_suppkey | ALL |\n| DWDATE | d_datekey | ALL |",
"_____no_output_____"
],
[
"[Sort Keys](http://docs.aws.amazon.com/redshift/latest/dg/tutorial-tuning-tables-sort-keys.html)\n\nWhen you create a table, you can specify one or more columns as the sort key. Amazon Redshift stores your data on disk in sorted order according to the sort key. How your data is sorted has an important effect on disk I/O, columnar compression, and query performance.\n\nChoose sort keys for based on these best practices:\n\nIf recent data is queried most frequently, specify the timestamp column as the leading column for the sort key.\n\nIf you do frequent range filtering or equality filtering on one column, specify that column as the sort key.\n\nIf you frequently join a (dimension) table, specify the join column as the sort key.",
"_____no_output_____"
],
[
"[Distribution Styles](http://docs.aws.amazon.com/redshift/latest/dg/c_choosing_dist_sort.html)\n\nWhen you create a table, you designate one of three distribution styles: KEY, ALL, or EVEN.\n\n**KEY distribution**\n\nThe rows are distributed according to the values in one column. The leader node will attempt to place matching values on the same node slice. If you distribute a pair of tables on the joining keys, the leader node collocates the rows on the slices according to the values in the joining columns so that matching values from the common columns are physically stored together.\n\n**ALL distribution**\n\nA copy of the entire table is distributed to every node. Where EVEN distribution or KEY distribution place only a portion of a table's rows on each node, ALL distribution ensures that every row is collocated for every join that the table participates in.\n\n**EVEN distribution**\n\nThe rows are distributed across the slices in a round-robin fashion, regardless of the values in any particular column. EVEN distribution is appropriate when a table does not participate in joins or when there is not a clear choice between KEY distribution and ALL distribution. EVEN distribution is the default distribution style.",
"_____no_output_____"
],
[
"<h2 id=\"kinesis\">Kinesis</h2>",
"_____no_output_____"
],
[
"Create a stream:",
"_____no_output_____"
]
],
[
[
"!aws kinesis create-stream --stream-name Foo --shard-count 1 --profile adminuser",
"_____no_output_____"
]
],
[
[
"List all streams:",
"_____no_output_____"
]
],
[
[
"!aws kinesis list-streams --profile adminuser",
"_____no_output_____"
]
],
[
[
"Get info about the stream:",
"_____no_output_____"
]
],
[
[
"!aws kinesis describe-stream --stream-name Foo --profile adminuser",
"_____no_output_____"
]
],
[
[
"Put a record to the stream:",
"_____no_output_____"
]
],
[
[
"!aws kinesis put-record --stream-name Foo --data \"SGVsbG8sIHRoaXMgaXMgYSB0ZXN0IDEyMy4=\" --partition-key shardId-000000000000 --region us-east-1 --profile adminuser",
"_____no_output_____"
]
],
[
[
"Get records from a given shard:",
"_____no_output_____"
]
],
[
[
"!SHARD_ITERATOR=$(aws kinesis get-shard-iterator --shard-id shardId-000000000000 --shard-iterator-type TRIM_HORIZON --stream-name Foo --query 'ShardIterator' --profile adminuser)\naws kinesis get-records --shard-iterator $SHARD_ITERATOR",
"_____no_output_____"
]
],
[
[
"Delete a stream:",
"_____no_output_____"
]
],
[
[
"!aws kinesis delete-stream --stream-name Foo --profile adminuser",
"_____no_output_____"
]
],
[
[
"<h2 id=\"lambda\">Lambda</h2>",
"_____no_output_____"
],
[
"List lambda functions:",
"_____no_output_____"
]
],
[
[
"!aws lambda list-functions \\\n --region us-east-1 \\\n --max-items 10",
"_____no_output_____"
]
],
[
[
"Upload a lambda function:",
"_____no_output_____"
]
],
[
[
"!aws lambda upload-function \\\n --region us-east-1 \\\n --function-name foo \\\n --function-zip file-path/foo.zip \\\n --role IAM-role-ARN \\\n --mode event \\\n --handler foo.handler \\\n --runtime nodejs \\\n --debug",
"_____no_output_____"
]
],
[
[
"Invoke a lambda function:",
"_____no_output_____"
]
],
[
[
"!aws lambda invoke-async \\\n --function-name foo \\\n --region us-east-1 \\\n --invoke-args foo.txt \\\n --debug",
"_____no_output_____"
]
],
[
[
"Return metadata for a specific function:",
"_____no_output_____"
]
],
[
[
"!aws lambda get-function-configuration \\\n --function-name helloworld \\\n --region us-east-1 \\\n --debug",
"_____no_output_____"
]
],
[
[
"Return metadata for a specific function along with a presigned URL that you can use to download the function's .zip file that you uploaded:",
"_____no_output_____"
]
],
[
[
"!aws lambda get-function \\\n --function-name helloworld \\\n --region us-east-1 \\\n --debug",
"_____no_output_____"
]
],
[
[
"Add an event source:",
"_____no_output_____"
]
],
[
[
"!aws lambda add-event-source \\\n --region us-east-1 \\\n --function-name ProcessKinesisRecords \\\n --role invocation-role-arn \\\n --event-source kinesis-stream-arn \\\n --batch-size 100 \\\n --profile adminuser",
"_____no_output_____"
]
],
[
[
"Delete a lambda function:",
"_____no_output_____"
]
],
[
[
"!aws lambda delete-function \\\n --function-name helloworld \\\n --region us-east-1 \\\n --debug",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f9aaf8ca36a2b77b767e0bf9cececb6027edb6 | 362,814 | ipynb | Jupyter Notebook | day2_visualization.ipynb | lat-lukasz/dw_matrix_car | 3b32e3ed34907420cf1efcb22b5f0cef041ea7ea | [
"MIT"
] | null | null | null | day2_visualization.ipynb | lat-lukasz/dw_matrix_car | 3b32e3ed34907420cf1efcb22b5f0cef041ea7ea | [
"MIT"
] | null | null | null | day2_visualization.ipynb | lat-lukasz/dw_matrix_car | 3b32e3ed34907420cf1efcb22b5f0cef041ea7ea | [
"MIT"
] | null | null | null | 362,814 | 362,814 | 0.91712 | [
[
[
"!pip install --upgrade tables",
"Collecting tables\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/ed/c3/8fd9e3bb21872f9d69eb93b3014c86479864cca94e625fd03713ccacec80/tables-3.6.1-cp36-cp36m-manylinux1_x86_64.whl (4.3MB)\n\u001b[K |████████████████████████████████| 4.3MB 2.8MB/s \n\u001b[?25hRequirement already satisfied, skipping upgrade: numexpr>=2.6.2 in /usr/local/lib/python3.6/dist-packages (from tables) (2.7.1)\nRequirement already satisfied, skipping upgrade: numpy>=1.9.3 in /usr/local/lib/python3.6/dist-packages (from tables) (1.17.5)\nInstalling collected packages: tables\n Found existing installation: tables 3.4.4\n Uninstalling tables-3.4.4:\n Successfully uninstalled tables-3.4.4\nSuccessfully installed tables-3.6.1\n"
],
[
"ls",
"\u001b[0m\u001b[01;34mdrive\u001b[0m/ \u001b[01;34msample_data\u001b[0m/\n"
],
[
"cd 'drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car'",
"/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_two/dw_matrix_car\n"
],
[
"ls",
"\u001b[0m\u001b[01;34mdata\u001b[0m/ LICENSE README.md\n"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport re",
"_____no_output_____"
],
[
"df = pd.read_hdf('data/car.h5')\ndf.head()",
"_____no_output_____"
],
[
"df.columns.values",
"_____no_output_____"
],
[
"df['price_value'].hist(bins=100)",
"_____no_output_____"
],
[
"df['price_value'].max()\n\n",
"_____no_output_____"
],
[
"df['price_value'].describe()",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].nunique(), df['param_marka-pojazdu'].unique()",
"_____no_output_____"
],
[
"df['param_marka-pojazdu'].nunique()",
"_____no_output_____"
],
[
"def group_and_barplot(feat_groupby, feat_agg='price_value', agg_funcs=[np.mean, np.median, np.size], feat_sort='mean', top=50):\n return (\n df\n .groupby(feat_groupby)[feat_agg]\n .agg(agg_funcs) \n .sort_values(by=feat_sort, ascending=False)\n .head(top)\n ).plot(kind='bar',figsize=(20,10), subplots=True)",
"_____no_output_____"
],
[
"group_and_barplot('param_marka-pojazdu');",
"_____no_output_____"
],
[
"group_and_barplot('param_kraj-pochodzenia',feat_sort = 'size');",
"_____no_output_____"
],
[
"group_and_barplot('param_kolor', feat_sort='mean');",
"_____no_output_____"
],
[
"def int_filter(val):\n val_int = re.sub('[^0-9]','', val)\n if val_int != '': return val_int\n else: return -1\n \ndf['param_przebieg'].sample(40)",
"_____no_output_____"
],
[
"pd.cut(df['param_przebieg'].astype(str).map(lambda x: int_filter(x) ).astype(np.int), 10)\n",
"_____no_output_____"
],
[
"df['group_of_przebieg'] = pd.qcut(df['param_przebieg'].astype(str).map(lambda x: int_filter(x) ).astype(np.int), 10)\n\ngroup_and_barplot('group_of_przebieg', feat_sort='mean');",
"_____no_output_____"
],
[
"df['param_kategoria'].value_counts()",
"_____no_output_____"
],
[
"group_and_barplot('param_napęd', feat_sort='mean');\n",
"_____no_output_____"
],
[
"group_and_barplot('param_typ', feat_sort='size');\n",
"_____no_output_____"
],
[
"group_and_barplot('param_skrzynia-biegów', feat_sort='size');\n",
"_____no_output_____"
],
[
"group_and_barplot('param_rodzaj-paliwa', feat_sort='size');\n\n",
"_____no_output_____"
],
[
"df['param_kierownica-po-prawej-(anglik)'] = df['param_kierownica-po-prawej-(anglik)'].map(lambda x: x if x == 'Tak' else 'Nie')\n",
"_____no_output_____"
],
[
"group_and_barplot('param_kierownica-po-prawej-(anglik)', feat_sort='size');\n\n\n",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7f9c51de7b5a9697a5abad51534248f7dd6c256 | 1,102 | ipynb | Jupyter Notebook | 01_log_conversion.ipynb | L0D3/P191919 | 27504930834167dd33d92347c0fe65fefd0e4b7e | [
"Apache-2.0"
] | null | null | null | 01_log_conversion.ipynb | L0D3/P191919 | 27504930834167dd33d92347c0fe65fefd0e4b7e | [
"Apache-2.0"
] | null | null | null | 01_log_conversion.ipynb | L0D3/P191919 | 27504930834167dd33d92347c0fe65fefd0e4b7e | [
"Apache-2.0"
] | null | null | null | 17.492063 | 78 | 0.508167 | [
[
[
"#default_exp conversion",
"_____no_output_____"
]
],
[
[
"# Log Conversion\n\n> Converts the event logs into csv format to make it easier to load them",
"_____no_output_____"
]
],
[
[
"%load_ext autoreload\n%autoreload 2\n%matplotlib inline",
"The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7f9d933e76ab3226591a6c3c08916179a8965d3 | 2,381 | ipynb | Jupyter Notebook | HerniaAnnotation/HerniaCode/Notebooks/RemoveNoneClass.ipynb | molinamarcvdb/aigt | 75cadc66e3b8eb3736085a9e38ef9fb70521c94d | [
"BSD-3-Clause"
] | 26 | 2019-10-10T18:51:38.000Z | 2022-02-27T12:17:58.000Z | HerniaAnnotation/HerniaCode/Notebooks/RemoveNoneClass.ipynb | molinamarcvdb/aigt | 75cadc66e3b8eb3736085a9e38ef9fb70521c94d | [
"BSD-3-Clause"
] | 13 | 2019-11-05T01:40:00.000Z | 2022-02-08T15:29:36.000Z | HerniaAnnotation/HerniaCode/Notebooks/RemoveNoneClass.ipynb | molinamarcvdb/aigt | 75cadc66e3b8eb3736085a9e38ef9fb70521c94d | [
"BSD-3-Clause"
] | 22 | 2019-10-07T16:09:12.000Z | 2022-03-17T09:19:54.000Z | 21.645455 | 105 | 0.48971 | [
[
[
"import numpy as np\n\nclasses = ['None', 'Extob', 'Fat', 'Sack', 'Skin', 'Spchd']\n\nxFile = r\"C:\\Users\\PerkLab\\Desktop\\HerniaAnnotationData-2019-10-30\\x_train_fifth_128.npy\"\nyFile = r\"C:\\Users\\PerkLab\\Desktop\\HerniaAnnotationData-2019-10-30\\y_train_fifth_128.npy\"\n\nxOut = xFile[:-4]+\"_NoNone.npy\"\nyOut = yFile[:-4]+\"_NoNone.npy\"\n\narrX = np.load(xFile)\nprint(arrX.shape)\narrY = np.load(yFile)\nprint(arrY.shape)\nlength = arrX.shape[0]\nprint(length)\n\n\n# firstOne = np.where(arrY==1.0)[0][0]\n# print(firstOne)\n# arrX = arrX[firstOne:]\n# arrY = arrY[firstOne:]\n\nindexesToDrop = [i for i in range(length) if classes[int(arrY[i])]=='None']\n\narrX = np.delete(arrX, indexesToDrop, axis=0)\narrY = np.delete(arrY, indexesToDrop, axis=0)-1\n\nprint(arrX.shape)\nprint(arrY.shape)\n\n\n\n",
"(32359, 128, 128, 3)\n(32359,)\n32359\n(27894, 128, 128, 3)\n(27894,)\n"
],
[
"print(np.where(arrY==-1))\nprint(np.max(arrY))\nprint(np.min(arrY))",
"(array([], dtype=int64),)\n4.0\n0.0\n"
],
[
"np.save(xOut, arrX)\nnp.save(yOut, arrY)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7f9e1475af4368b30e008441d6f23109fd9bb88 | 459,921 | ipynb | Jupyter Notebook | docs/source/tutorial.ipynb | milicolazo/Pyedra | 44002e2bfca852e44337df150d8ff3c231470a43 | [
"MIT"
] | 16 | 2020-10-01T19:39:03.000Z | 2022-02-17T03:43:29.000Z | docs/source/tutorial.ipynb | milicolazo/Pyedra | 44002e2bfca852e44337df150d8ff3c231470a43 | [
"MIT"
] | 47 | 2020-10-12T15:41:41.000Z | 2021-03-07T14:34:00.000Z | docs/source/tutorial.ipynb | milicolazo/Pyedra | 44002e2bfca852e44337df150d8ff3c231470a43 | [
"MIT"
] | 7 | 2020-10-15T15:11:22.000Z | 2021-08-27T23:42:15.000Z | 253.4 | 66,500 | 0.906456 | [
[
[
"# Pyedra's Tutorial\n\nThis tutorial is intended to serve as a guide for using Pyedra to analyze asteroid phase curve data.\n\n## Imports\n\nThe first thing we will do is import the necessary libraries. In general you will need the following:\n- `pyedra` (*pyedra*) is the library that we present in this tutorial.\n- `pandas` (*pandas*) this library will allow you to import your data as a dataframe.\n\n_Note: In this tutorial we assume that you already have experience using these libraries._",
"_____no_output_____"
]
],
[
[
"import pyedra\nimport pandas as pd",
"_____no_output_____"
]
],
[
[
"## Load the data\n\nThe next thing we have to do is load our data. Pyedra should receive a dataframe with three columns: id (MPC number of the asteroid), alpha ($\\alpha$, phase angle) and v (reduced magnitude in Johnson's V filter). You must respect the order and names of the columns as they have been mentioned. In this step we recommend the use of pandas:\n\n`df = pd.read_csv('somefile.csv')`\n\nFor this tutorial we will use a preloaded data set offered by Pyedra.",
"_____no_output_____"
]
],
[
[
"df = pyedra.datasets.load_carbognani2019()",
"_____no_output_____"
]
],
[
[
"Here we show you the structure that your data file should have. Note that the file can contain information about many asteroids, which allows to obtain catalogs of the parameters of the phase function for large databases.",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
]
],
[
[
"## Fit your data\n\nPyedra's main objective is to fit a phase function model to our data. Currently the api offers three different models:\n\n- `HG_fit` (H, G model): $V(\\alpha)=H-2.5log_{10}[(1-G)\\Phi_{1}(\\alpha)+G\\Phi_{2}(\\alpha)]$\n\n- `Shev_fit` (Shevchenko model): $V(1,\\alpha)=V(1,0)-\\frac{a}{1+\\alpha}+b\\cdot\\alpha$\n\n- `HG1G2_fit` (H, G$_1$, G$_2$ model): $V(\\alpha) = H-2.5log_{10}[G_{1}\\Phi_{1}(\\alpha)+G_{2}\\Phi_{2}(\\alpha)+(1-G_{1}-G_{2})\\Phi_{3}(\\alpha)]$\n\nWe will now explain how to apply each of them. At the end of this tutorial you will notice that they all work in an analogous way and that their implementation is very simple.",
"_____no_output_____"
],
[
"### HG_fit\n\nLet's assume that we want to fit the biparametric model H, G to our data set. What we will do is invoke Pyedra's `HG_fit` function:",
"_____no_output_____"
]
],
[
[
"HG = pyedra.HG_fit(df)",
"_____no_output_____"
]
],
[
[
"We have already created our catalog of H, G parameters for our data set. Let's see what it looks like.",
"_____no_output_____"
]
],
[
[
"HG",
"_____no_output_____"
]
],
[
[
"**R** is the coefficient of determination of the fit\n",
"_____no_output_____"
],
[
"All pandas dataframe options are available. For example, you may be interested in knowing the mean H of your sample. To do so:",
"_____no_output_____"
]
],
[
[
"HG.H.mean()",
"_____no_output_____"
]
],
[
[
"Remeber that `HG.H` selects the H column.",
"_____no_output_____"
]
],
[
[
"HG.H",
"_____no_output_____"
]
],
[
[
"The `PyedraFitDataFrame` can also be filtered, like a canonical pandas dataframe. Let's assume that we want to save the created catalog, but only for those asteroids whose id is less than t300. All we have to do is:",
"_____no_output_____"
]
],
[
[
"filtered = HG.model_df[HG.model_df['id'] < 300]\nfiltered",
"_____no_output_____"
]
],
[
[
"Finally we want to see our data plotted together with their respective fits. To do this we will use the `.plot` function provided by Pyedra. To obtain the graph with the adjustments of the phase function model we only have to pass to `.plot` the dataframe that contains our data in the following way:",
"_____no_output_____"
]
],
[
[
"HG.plot(df=df)",
"_____no_output_____"
]
],
[
[
"If your database is very large and you want a clearer graph, or if you only want to see the fit of one of the asteroids you can filter your initial dataframe.",
"_____no_output_____"
]
],
[
[
"asteroid_85 = df[df['id'] == 85]\nHG_85 = pyedra.HG_fit(asteroid_85)\nHG_85.plot(df = asteroid_85)",
"_____no_output_____"
]
],
[
[
"All pandas plots are available if you want to use any of them. For example, we may want to visualize the histogram of one of the parameters:",
"_____no_output_____"
]
],
[
[
"HG.plot(y='G', kind='hist')",
"_____no_output_____"
]
],
[
[
"Or we may want to find out if there is a correlation between parameters:",
"_____no_output_____"
]
],
[
[
"HG.plot(x='G', y='H', kind='scatter', marker='o', color='black')",
"_____no_output_____"
]
],
[
[
"Everything we have done in this section can be extended in an analogous way to the rest of the models, as we will see below.",
"_____no_output_____"
],
[
"### HG1G2_fit\n\nNow we want to adjust the H, G$_1$, G$_2$ model to our data. Use the function `HG1G2_fit` in the following way.",
"_____no_output_____"
]
],
[
[
"HG1G2 = pyedra.HG1G2_fit(df)\nHG1G2",
"_____no_output_____"
]
],
[
[
"**R** is the coefficient of determination of the fit.",
"_____no_output_____"
],
[
"We can calculate, for example, the median of each of the columns:",
"_____no_output_____"
]
],
[
[
"HG1G2.median()",
"_____no_output_____"
]
],
[
[
"Again, we can filter our catalog. We are keeping the best settings, that is, those whose R is greater than 0.98.",
"_____no_output_____"
]
],
[
[
"best_fits = HG1G2.model_df[HG1G2.model_df['R'] > 0.98]\nbest_fits",
"_____no_output_____"
]
],
[
[
"We will now look at the graphics. ",
"_____no_output_____"
]
],
[
[
"HG1G2.plot(df=df)",
"_____no_output_____"
]
],
[
[
"If we want to visualize the graph only of the asteroid (522):",
"_____no_output_____"
]
],
[
[
"asteroid_522 = df[df['id'] == 522]\nHG1G2_522 = pyedra.HG_fit(asteroid_522)\nHG1G2_522.plot(df=asteroid_522)",
"_____no_output_____"
]
],
[
[
"To see the correlation between the parameters G$_1$ and G$_2$ we can use the \"scatter\" graph of pandas:",
"_____no_output_____"
]
],
[
[
"HG1G2.plot(x='G1', y='G2', kind='scatter')",
"_____no_output_____"
]
],
[
[
"### Shev_fit",
"_____no_output_____"
],
[
"If we want to adjust the Shevchenko model to our data, we must use `Shev_fit`.",
"_____no_output_____"
]
],
[
[
"Shev = pyedra.Shev_fit(df)\nShev",
"_____no_output_____"
]
],
[
[
"**R** is the coefficient of determination of the fit.",
"_____no_output_____"
],
[
"We can select a particular column and calculate, for example, its minimum:",
"_____no_output_____"
]
],
[
[
"Shev.V_lin",
"_____no_output_____"
],
[
"Shev.V_lin.min()",
"_____no_output_____"
]
],
[
[
"And obviously we can graph the resulting fit:",
"_____no_output_____"
]
],
[
[
"Shev.plot(df=df)",
"_____no_output_____"
]
],
[
[
"Selecting a subsample:",
"_____no_output_____"
]
],
[
[
"subsample = df[df['id'] > 100 ]\nShev_subsample = pyedra.Shev_fit(subsample)\nShev_subsample.plot(df=subsample)",
"_____no_output_____"
]
],
[
[
"We can use some of the pandas plot.",
"_____no_output_____"
]
],
[
[
"Shev_subsample.plot(y=['b', 'error_b'], kind='density', subplots=True, figsize=(5,5), xlim=(0,2))\n",
"_____no_output_____"
]
],
[
[
"## Gaia Data",
"_____no_output_____"
],
[
"Below we show the procedure to combine some observation dataset with Gaia DR2 observations.",
"_____no_output_____"
],
[
"We import the gaia data with `load_gaia()`",
"_____no_output_____"
]
],
[
[
"gaia = pyedra.datasets.load_gaia()",
"_____no_output_____"
]
],
[
[
"We then join both datasets (ours and gaia) with `merge_obs`",
"_____no_output_____"
]
],
[
[
"merge = pyedra.merge_obs(df, gaia)\nmerge = merge[['id', 'alpha', 'v']]",
"_____no_output_____"
]
],
[
[
"We apply to the new dataframe all the functionalities as we did before",
"_____no_output_____"
]
],
[
[
"catalog = pyedra.HG_fit(merge)",
"_____no_output_____"
],
[
"catalog.plot(df=merge)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7f9ea122d12e059f97e56c35f83c652d20ed34e | 2,222 | ipynb | Jupyter Notebook | Notebooks/02_analyses/Fig2_Shannon_Entropy.ipynb | BioProteanLabs/SFt_pipeline | f383ee7e76e962825a0f8ed8dc34d49ec12133ce | [
"MIT"
] | null | null | null | Notebooks/02_analyses/Fig2_Shannon_Entropy.ipynb | BioProteanLabs/SFt_pipeline | f383ee7e76e962825a0f8ed8dc34d49ec12133ce | [
"MIT"
] | null | null | null | Notebooks/02_analyses/Fig2_Shannon_Entropy.ipynb | BioProteanLabs/SFt_pipeline | f383ee7e76e962825a0f8ed8dc34d49ec12133ce | [
"MIT"
] | null | null | null | 25.25 | 125 | 0.531503 | [
[
[
"# Shannon's Entropy of ABA features",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom skimage.measure import shannon_entropy\nfrom morphontogeny.functions.IO import reconstruct_ABA",
"_____no_output_____"
],
[
"def level_arr(array, levels=256):\n \n arr = array - np.nanmin(array) # set the minimum to zero\n arr = (arr / np.nanmax(arr)) * (levels - 1) # and the max to the number of levels - 1\n arr = np.round(arr) # round to ints.\n \n return arr",
"_____no_output_____"
],
[
"# Loading features\nSFT_features = np.load('files/SFT_100features.npy')\n\n# List of Entropies\nSFT_shannon_entropies = []\n\n# Indices file\nindices_file = 'files/mask_indices.npy'\n\nfor ii in range(SFT_features.shape[1]):\n \n # Reconstructing features\n SFT_feature = reconstruct_ABA(SFT_features[:,ii], indices_file=indices_file, outside_value=np.nan, mirror=False)\n \n # Levelling the arrays\n SFT_feature = level_arr(SFT_feature)\n \n # Adding entropies\n SFT_shannon_entropies.append(shannon_entropy(SFT_feature))",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7f9f3dc94739d4dee9b72ef6bcbc324cbc181f4 | 25,328 | ipynb | Jupyter Notebook | 05-2021-05-21/notebooks/05-00_The_pandas_library.ipynb | eotp/python-FU-class | f0a7518b3e3204a77e8855bef91afeaabb0d52ac | [
"MIT"
] | 1 | 2020-01-17T14:51:40.000Z | 2020-01-17T14:51:40.000Z | 05-2022-06-02/notebooks/05-00_The_pandas_library.ipynb | eotp/python-FU-WiSe1920 | 4f225430ef8a70faca8c86c77cc888524c8e0546 | [
"MIT"
] | null | null | null | 05-2022-06-02/notebooks/05-00_The_pandas_library.ipynb | eotp/python-FU-WiSe1920 | 4f225430ef8a70faca8c86c77cc888524c8e0546 | [
"MIT"
] | 1 | 2020-12-04T15:37:28.000Z | 2020-12-04T15:37:28.000Z | 22.120524 | 849 | 0.528032 | [
[
[
"# The pandas library",
"_____no_output_____"
],
[
"The [pandas library](https://pandas.pydata.org/) was created by [Wes McKinney](http://wesmckinney.com/) in 2010. pandas provides **data structures** and **functions** \nfor manipulating, processing, cleaning and crunching data. In the Python ecosystem pandas is the state-of-the-art tool for working with tabular or spreadsheet-like data in which each column may be a different type (`string`, `numeric`, `date`, or otherwise). pandas provides sophisticated indexing functionality to make it easy to reshape, slice and dice, perform aggregations, and select subsets of data. pandas relies on other packages, such as [NumPy](http://www.numpy.org/) and [SciPy](https://scipy.org/scipylib/index.html). \nFurther pandas integrates [matplotlib](https://matplotlib.org/) for plotting. \n\nIf you are new to pandas we strongly recommend to visit the very well written [__pandas tutorials__](https://pandas.pydata.org/pandas-docs/stable/tutorials.html), which cover all relevant sections for new users to properly get started.\n\n\nOnce installed (for details refer to the [documentation](https://pandas.pydata.org/pandas-docs/stable/install.html)), pandas is imported by using the canonical alias `pd`.",
"_____no_output_____"
]
],
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"import numpy as np",
"_____no_output_____"
]
],
[
[
"The pandas library has two workhorse data structures: __*Series*__ and __*DataFrame*__.\n\n* one dimensional `pd.Series` object\n* two dimensional `pd.DataFrame` object",
"_____no_output_____"
],
[
"***\n\n## The `pd.Series` object",
"_____no_output_____"
],
[
"Data generation",
"_____no_output_____"
]
],
[
[
"# import the random module from numpy\nfrom numpy import random \n# set seed for reproducibility\nrandom.seed(123) \n# generate 26 random integers between -10 and 10\nmy_data = random.randint(low=-10, high=10, size=26)\n# print the data\nmy_data",
"_____no_output_____"
],
[
"type(my_data)",
"_____no_output_____"
]
],
[
[
"A Series is a one-dimensional array-like object containing an array of data and an associated array of data labels, called its _index_. We create a `pd.Series` object by calling the `pd.Series()` function. ",
"_____no_output_____"
]
],
[
[
"# Uncomment to look up the documentation\n\n# docstring\n#?pd.Series \n\n# source\n#??pd.Series ",
"_____no_output_____"
],
[
"# create a pd.Series object\ns = pd.Series(data=my_data)\ns",
"_____no_output_____"
],
[
"type(s)",
"_____no_output_____"
]
],
[
[
"***\n\n### `pd.Series` attributes\n\nPython objects in general and the `pd.Series` in particular offer useful object-specific *attributes*.\n\n* _attribute_ $\\to$ `OBJECT.attribute` $\\qquad$ _Note that the attribute is called without parenthesis_",
"_____no_output_____"
]
],
[
[
"s.dtypes",
"_____no_output_____"
],
[
"s.index",
"_____no_output_____"
]
],
[
[
"We can use the `index` attribute to assign an index to a `pd.Series` object.\n\nConsider the letters of the alphabet....",
"_____no_output_____"
]
],
[
[
"import string\nletters = string.ascii_uppercase\nletters",
"_____no_output_____"
]
],
[
[
"By providing an array-type object we assign a new index to the `pd.Series` object.",
"_____no_output_____"
]
],
[
[
"s.index = list(letters)",
"_____no_output_____"
],
[
"s.index",
"_____no_output_____"
],
[
"s",
"_____no_output_____"
]
],
[
[
"***\n### `pd.Series` methods\n\nMethods are functions that are called using the attribute notation. Hence they are called by appending a dot (`.`) to the Python object, followed by the name of the method, parentheses `()` and in case one or more arguments (`arg`). \n\n* _method_ $\\to$ `OBJECT.method_name(arg1, arg2, ...)`",
"_____no_output_____"
]
],
[
[
"s.sum()",
"_____no_output_____"
],
[
"s.mean()",
"_____no_output_____"
],
[
"s.max()",
"_____no_output_____"
],
[
"s.min()",
"_____no_output_____"
],
[
"s.median()",
"_____no_output_____"
],
[
"s.quantile(q=0.5)",
"_____no_output_____"
],
[
"s.quantile(q=[0.25, 0.5, 0.75])",
"_____no_output_____"
]
],
[
[
"***\n### Element-wise arithmetic\n\n\nA very useful feature of `pd.Series` objects is that we may apply arithmetic operations *element-wise*.",
"_____no_output_____"
]
],
[
[
"s+10\n#s*0.1\n#10/s\n#s**2\n#(2+s)*1**3\n#s+s",
"_____no_output_____"
]
],
[
[
"***\n### Selection and Indexing\n\nAnother main data operation is indexing and selecting particular subsets of the data object. pandas comes with a very [rich set of methods](https://pandas.pydata.org/pandas-docs/stable/indexing.html) for these type of tasks. \n\nIn its simplest form we index a Series numpy-like, by using the `[]` operator to select a particular `index` of the Series.",
"_____no_output_____"
]
],
[
[
"s",
"_____no_output_____"
],
[
"s[3]",
"_____no_output_____"
],
[
"s[2:6]",
"_____no_output_____"
],
[
"s[\"C\"]",
"_____no_output_____"
],
[
"s[\"C\":\"K\"]",
"_____no_output_____"
]
],
[
[
"***\n\n## The `pd.DataFrame` object\n\nThe primary pandas data structure is the `DataFrame`. It is a two-dimensional size-mutable, potentially heterogeneous tabular data structure with both row and column labels. Arithmetic operations align on both row and column labels. Basically, the `DataFrame` can be thought of as a `dictionary`-like container for Series objects. \n\n\n",
"_____no_output_____"
],
[
"**Generate a `DataFrame` object from scratch** \n\npandas facilitates the import of different data types and sources, however, for the sake of this tutorial we generate a `DataFrame` object from scratch. \n\nSource: http://duelingdata.blogspot.de/2016/01/the-beatles.html",
"_____no_output_____"
]
],
[
[
"df = pd.DataFrame({\"id\" : range(1,5),\n \"Name\" : [\"John\", \"Paul\", \"George\", \"Ringo\"],\n \"Last Name\" : [\"Lennon\", \"McCartney\", \"Harrison\", \"Star\"],\n \"dead\" : [True, False, True, False],\n \"year_born\" : [1940, 1942, 1943, 1940],\n \"no_of_songs\" : [62, 58, 24, 3]\n })\ndf",
"_____no_output_____"
]
],
[
[
"***\n### `pd.DataFrame` attributes",
"_____no_output_____"
]
],
[
[
"df.dtypes",
"_____no_output_____"
],
[
"# axis 0\ndf.columns",
"_____no_output_____"
],
[
"# axis 1\ndf.index",
"_____no_output_____"
]
],
[
[
"***\n### `pd.DataFrame` methods\n",
"_____no_output_____"
],
[
"**Get a quick overview of the data set**",
"_____no_output_____"
]
],
[
[
"df.info()",
"_____no_output_____"
],
[
"df.describe()",
"_____no_output_____"
],
[
"df.describe(include=\"all\")",
"_____no_output_____"
]
],
[
[
"**Change index to the variable `id`**",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
],
[
"df.set_index(\"id\")",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"Note that nothing changed!!\n\nFor the purpose of memory and computation efficiency `pandas` returns a view of the object, rather than a copy. Hence, if we want to make a permanent change we have to assign/reassign the object to a variable:\n\n df = df.set_index(\"id\") \n \nor, some methods have the `inplace=True` argument:\n\n df.set_index(\"id\", inplace=True) ",
"_____no_output_____"
]
],
[
[
"df = df.set_index(\"id\")",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"**Arithmetic methods**",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
],
[
"df.sum(axis=0)",
"_____no_output_____"
],
[
"df.sum(axis=1)",
"_____no_output_____"
]
],
[
[
"#### `groupby` method\n[Hadley Wickham 2011: The Split-Apply-Combine Strategy for Data Analysis, Journal of Statistical Software, 40(1)](https://www.jstatsoft.org/article/view/v040i01)",
"_____no_output_____"
],
[
"<img src=\"_img/split-apply-combine.svg\" width=\"800\">\n\nImage source: [Jake VanderPlas 2016, Data Science Handbook](https://jakevdp.github.io/PythonDataScienceHandbook/)",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
],
[
"df.groupby(\"dead\")",
"_____no_output_____"
],
[
"df.groupby(\"dead\").sum()",
"_____no_output_____"
],
[
"df.groupby(\"dead\")[\"no_of_songs\"].sum()",
"_____no_output_____"
],
[
"df.groupby(\"dead\")[\"no_of_songs\"].mean()",
"_____no_output_____"
],
[
"df.groupby(\"dead\")[\"no_of_songs\"].agg([\"mean\", \"max\", \"min\", \"sum\"])",
"_____no_output_____"
]
],
[
[
"#### Family of `apply`/`map` methods\n\n* `apply` works on a row (`axis=0`, default) / column (`axis=1`) basis of a `DataFrame`\n* `applymap` works __element-wise__ on a `DataFrame`\n* `map` works __element-wise__ on a `Series`.\n",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
],
[
"# (axis=0, default)\ndf[[\"Last Name\", \"Name\"]].apply(lambda x: x.sum())",
"_____no_output_____"
],
[
"# (axis=1)\ndf[[\"Last Name\", \"Name\"]].apply(lambda x: x.sum(), axis=1)",
"_____no_output_____"
]
],
[
[
"_... maybe a more useful case ..._",
"_____no_output_____"
]
],
[
[
"df.apply(lambda x: \" \".join(x[[\"Name\", \"Last Name\"]]), axis=1)",
"_____no_output_____"
]
],
[
[
"***\n### Selection and Indexing",
"_____no_output_____"
],
[
"**Column index**",
"_____no_output_____"
]
],
[
[
"df[\"Name\"]",
"_____no_output_____"
],
[
"df[[\"Name\", \"Last Name\"]]",
"_____no_output_____"
],
[
"df.dead",
"_____no_output_____"
]
],
[
[
"**Row index**\n\nIn addition to the `[]` operator pandas ships with other indexing operators such as `.loc[]` and `.iloc[]`, among others.\n\n* `.loc[]` is primarily __label based__, but may also be used with a boolean array.\n* `.iloc[]` is primarily __integer position based__ (from 0 to length-1 of the axis), but may also be used with a boolean array. \n",
"_____no_output_____"
]
],
[
[
"df.head(2)",
"_____no_output_____"
],
[
"df.loc[1]",
"_____no_output_____"
],
[
"df.iloc[1]",
"_____no_output_____"
]
],
[
[
"**Row and Columns indices**\n\n`df.loc[row, col]`",
"_____no_output_____"
]
],
[
[
"df.loc[1, \"Last Name\"]",
"_____no_output_____"
],
[
"df.loc[2:4, [\"Name\", \"dead\"]]",
"_____no_output_____"
]
],
[
[
"**Logical indexing**",
"_____no_output_____"
]
],
[
[
"df",
"_____no_output_____"
],
[
"df[\"no_of_songs\"] > 50",
"_____no_output_____"
],
[
"df.loc[df[\"no_of_songs\"] > 50]",
"_____no_output_____"
],
[
"df.loc[(df[\"no_of_songs\"] > 50) & (df[\"year_born\"] >= 1942)]",
"_____no_output_____"
],
[
"df.loc[(df[\"no_of_songs\"] > 50) & (df[\"year_born\"] >= 1942), [\"Last Name\", \"Name\"]]",
"_____no_output_____"
]
],
[
[
"***\n\n### Manipulating columns, rows and particular entries",
"_____no_output_____"
],
[
"**Add a row to the data set**",
"_____no_output_____"
]
],
[
[
"from numpy import nan\ndf.loc[5] = [\"Mickey\", \"Mouse\", nan, 1928, nan]\ndf",
"_____no_output_____"
],
[
"df.dtypes",
"_____no_output_____"
]
],
[
[
"_Note that the variable `dead` changed. Its values changed from `True`/`False` to `1.0`/`0.0`. Consequently its `dtype` changed from `bool` to `float64`._",
"_____no_output_____"
],
[
"**Add a column to the data set**",
"_____no_output_____"
]
],
[
[
"pd.datetime.today()",
"_____no_output_____"
],
[
"now = pd.datetime.today().year\nnow",
"_____no_output_____"
],
[
"df[\"age\"] = now - df.year_born\ndf",
"_____no_output_____"
]
],
[
[
"**Change a particular entry**",
"_____no_output_____"
]
],
[
[
"df.loc[5, \"Name\"] = \"Minnie\" ",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
]
],
[
[
"***\n## Plotting\n\nThe plotting functionality in pandas is built on top of matplotlib. It is quite convenient to start the visualization process with basic pandas plotting and to switch to matplotlib to customize the pandas visualization.",
"_____no_output_____"
],
[
"### `plot` method",
"_____no_output_____"
]
],
[
[
"# this call causes the figures to be plotted below the code cells\n%matplotlib inline",
"_____no_output_____"
],
[
"df",
"_____no_output_____"
],
[
"df[[\"no_of_songs\", \"age\"]].plot()",
"_____no_output_____"
],
[
"df[\"dead\"].plot.hist()",
"_____no_output_____"
],
[
"df[\"age\"].plot.bar()",
"_____no_output_____"
]
],
[
[
"## ...some notes on plotting with Python\n\n\nPlotting is an essential component of data analysis. However, the Python visualization world can be a frustrating place. There are many different options and choosing the right one is a challenge. (If you dare take a look at the [Python Visualization Landscape](https://github.com/rougier/python-visualization-landscape).)\n\n\n[matplotlib](https://matplotlib.org/) is probably the most well known 2D plotting Python library. It allows to produce publication quality figures in a variety of formats and interactive environments across platforms. However, matplotlib is the cause of frustration due to the complex syntax and due to existence of two interfaces, a __MATLAB like state-based interface__ and an __object-oriented interface__. Hence, __there is always more than one way to build a visualization__. Another source of confusion is that matplotlib is well integrated into other Python libraries, such as [pandas](http://pandas.pydata.org/index.html), [seaborn](http://seaborn.pydata.org/index.html), [xarray](http://xarray.pydata.org/en/stable/), among others. Hence, there is confusion as when to use pure matplotlib or a tool that is built on top of matplotlib.",
"_____no_output_____"
],
[
"We import the `matplotlib` library and matplotlib's `pyplot` module using the canonical commands\n\n import matplotlib as mpl\n import matplotlib.pyplot as plt\n\nWith respect to matplotlib terminology it is important to understand that the __`Figure`__ is the final image that may contain one or more axes, and that the __`Axes`__ represents an individual plot.\n\nTo create a `Figure` object we call\n\n fig = plt.figure()\n\nHowever, a more convenient way to create a `Figure` object and an `Axes` object at once, is to call\n\n fig, ax = plt.subplots() \n\nThen we can use the `Axes` object to add data for ploting. ",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\n# create a Figure and Axes object\nfig, ax = plt.subplots(figsize=(10,5)) \n\n# plot the data and reference the Axes object\ndf[\"age\"].plot.bar(ax=ax)\n\n# add some customization to the Axes object\nax.set_xticklabels(df[\"Name\"], rotation=0)\nax.set_xlabel(\"\")\nax.set_ylabel(\"Age\", size=14)\nax.set_title(\"The Beatles and ... something else\", size=18);",
"_____no_output_____"
]
],
[
[
"Note that we are only scratching the surface of the plotting capabilities with pandas. Refer to the pandas online documentation ([here](https://pandas.pydata.org/pandas-docs/stable/visualization.html)) for a comprehensive overview.\n",
"_____no_output_____"
],
[
"***",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7f9f603a770c9e210207a6e150782b5b5a198d9 | 70,311 | ipynb | Jupyter Notebook | 2018-10-30/jokes.ipynb | denistsoi/notebooks | 0f1ebe29870e4580ef6206e93e5717d645dcb5bd | [
"MIT"
] | null | null | null | 2018-10-30/jokes.ipynb | denistsoi/notebooks | 0f1ebe29870e4580ef6206e93e5717d645dcb5bd | [
"MIT"
] | null | null | null | 2018-10-30/jokes.ipynb | denistsoi/notebooks | 0f1ebe29870e4580ef6206e93e5717d645dcb5bd | [
"MIT"
] | null | null | null | 288.159836 | 2,979 | 0.695382 | [
[
[
"import requests\nr = requests.get('https://reddit.com/r/dadjokes.json')\n\n# file = open(\"jokes.json\", mode=\"w\")\n# file.write(str(r.json()));\n# file.close()",
"_____no_output_____"
],
[
"import json\nwith open(\"jokes.json\", \"r\") as json_file:\n json_data = json.load(json_file)\n\ntype(json_data);\n",
"_____no_output_____"
],
[
"json\njokes = json_data['data']['children']\n\nfor joke in jokes:\n print(joke['data'])",
"{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'The programmer returns home with 12 gallons of milk and says, “They had eggs.”\\n\\nedit: I know guys, I know, it’s supposed to be 13, I messed up the wording, please forgive me', 'author_fullname': 't2_13kqpc', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'A programmer gets sent to the store by his wife. His wife says, “Get a gallon of milk, and if they have eggs, get a dozen.”', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sq02g', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 7738, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 7738, 'approved_by': None, 'thumbnail': 'self', 'edited': 1540953870.0, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540948871.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>The programmer returns home with 12 gallons of milk and says, “They had eggs.”</p>\\n\\n<p>edit: I know guys, I know, it’s supposed to be 13, I messed up the wording, please forgive me</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sq02g', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'TheManicMonocle', 'num_crossposts': 4, 'num_comments': 289, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sq02g/a_programmer_gets_sent_to_the_store_by_his_wife/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sq02g/a_programmer_gets_sent_to_the_store_by_his_wife/', 'subreddit_subscribers': 1220565, 'created_utc': 1540920071.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': '', 'author_fullname': 't2_11clqf7w', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'Daddy, what is clickbait?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9srpt4', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 181, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 181, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540960002.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': None, 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9srpt4', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Crazybv', 'num_crossposts': 0, 'num_comments': 28, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9srpt4/daddy_what_is_clickbait/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9srpt4/daddy_what_is_clickbait/', 'subreddit_subscribers': 1220565, 'created_utc': 1540931202.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'You look for fresh prints', 'author_fullname': 't2_2949nb9l', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'How do you find Will Smith in the snow?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9svcmk', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 44, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 44, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540986791.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>You look for fresh prints</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9svcmk', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'brookscorbs', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9svcmk/how_do_you_find_will_smith_in_the_snow/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9svcmk/how_do_you_find_will_smith_in_the_snow/', 'subreddit_subscribers': 1220565, 'created_utc': 1540957991.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'I said “no mom! Where there’s a wheel, there’s a way!”', 'author_fullname': 't2_2au66rez', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'So I can legit ride a unicycle. My mom told me it was a waste of time and would never get me anywhere in life....', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sobv4', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 422, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 422, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540938146.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I said “no mom! Where there’s a wheel, there’s a way!”</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sobv4', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'serion15', 'num_crossposts': 0, 'num_comments': 8, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sobv4/so_i_can_legit_ride_a_unicycle_my_mom_told_me_it/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sobv4/so_i_can_legit_ride_a_unicycle_my_mom_told_me_it/', 'subreddit_subscribers': 1220565, 'created_utc': 1540909346.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': '“Sir, please stop writing separate checks for every single item.”', 'author_fullname': 't2_16sa6x', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': '[grocery store] Ok, milk...check, eggs...check, tomatoes...check.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sisyp', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 6434, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 6434, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540887015.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>“Sir, please stop writing separate checks for every single item.”</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sisyp', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'porichoygupto', 'num_crossposts': 0, 'num_comments': 102, 'send_replies': False, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sisyp/grocery_store_ok_milkcheck_eggscheck_tomatoescheck/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sisyp/grocery_store_ok_milkcheck_eggscheck_tomatoescheck/', 'subreddit_subscribers': 1220565, 'created_utc': 1540858215.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'I knew he meant well. ', 'author_fullname': 't2_2fov73', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'My friend told me not to drink from the wall.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9svbh7', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 15, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 15, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540986484.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I knew he meant well. </p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9svbh7', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'wexel64', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9svbh7/my_friend_told_me_not_to_drink_from_the_wall/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9svbh7/my_friend_told_me_not_to_drink_from_the_wall/', 'subreddit_subscribers': 1220565, 'created_utc': 1540957684.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'Just five more minutes.', 'author_fullname': 't2_16sa6x', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'A recent study found out how much sleep a normal teenager needs.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9smfdm', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 392, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 392, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540922306.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>Just five more minutes.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9smfdm', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'porichoygupto', 'num_crossposts': 0, 'num_comments': 11, 'send_replies': False, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9smfdm/a_recent_study_found_out_how_much_sleep_a_normal/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9smfdm/a_recent_study_found_out_how_much_sleep_a_normal/', 'subreddit_subscribers': 1220565, 'created_utc': 1540893506.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'It runs in your genes.', 'author_fullname': 't2_11clqf7w', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'What do diarrhea and eye colour have in common?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sob3i', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 146, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 146, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540938005.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>It runs in your genes.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sob3i', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Crazybv', 'num_crossposts': 0, 'num_comments': 3, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sob3i/what_do_diarrhea_and_eye_colour_have_in_common/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sob3i/what_do_diarrhea_and_eye_colour_have_in_common/', 'subreddit_subscribers': 1220565, 'created_utc': 1540909205.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': '“You are my Sunshine”', 'author_fullname': 't2_9d6wc', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'What song do all Vampires hate the most?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sum0t', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 15, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 15, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540980660.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>“You are my Sunshine”</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sum0t', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'mustachereviews', 'num_crossposts': 0, 'num_comments': 1, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sum0t/what_song_do_all_vampires_hate_the_most/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sum0t/what_song_do_all_vampires_hate_the_most/', 'subreddit_subscribers': 1220565, 'created_utc': 1540951860.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'I’m a faux pa', 'author_fullname': 't2_1rejoypr', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'I tell dad jokes but I have no kids', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9ssih9', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 29, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 29, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540965346.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I’m a faux pa</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9ssih9', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'salt-the-wounds', 'num_crossposts': 0, 'num_comments': 4, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9ssih9/i_tell_dad_jokes_but_i_have_no_kids/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9ssih9/i_tell_dad_jokes_but_i_have_no_kids/', 'subreddit_subscribers': 1220565, 'created_utc': 1540936546.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': \"There's nothing left but de Brie\", 'author_fullname': 't2_23ecjy55', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'Did you hear about the Cheese Factory in France that exploded?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9stzgz', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 17, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 17, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540975901.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>There&#39;s nothing left but de Brie</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9stzgz', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'grant_leslie', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9stzgz/did_you_hear_about_the_cheese_factory_in_france/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9stzgz/did_you_hear_about_the_cheese_factory_in_france/', 'subreddit_subscribers': 1220565, 'created_utc': 1540947101.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'You could say it was a Soviet Reunion. ', 'author_fullname': 't2_7rea5', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'Vladimir Putin has been visiting all the old U.S.S.R. Countries.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9so9dx', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 108, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 108, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540937652.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>You could say it was a Soviet Reunion. </p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9so9dx', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'itsfinn', 'num_crossposts': 0, 'num_comments': 7, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9so9dx/vladimir_putin_has_been_visiting_all_the_old_ussr/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9so9dx/vladimir_putin_has_been_visiting_all_the_old_ussr/', 'subreddit_subscribers': 1220565, 'created_utc': 1540908852.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'It’s a counter reformation.', 'author_fullname': 't2_16sa6x', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'My Catholic mother in law is renovating her kitchen.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sts58', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 17, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 17, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540974365.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>It’s a counter reformation.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sts58', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'porichoygupto', 'num_crossposts': 0, 'num_comments': 0, 'send_replies': False, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sts58/my_catholic_mother_in_law_is_renovating_her/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sts58/my_catholic_mother_in_law_is_renovating_her/', 'subreddit_subscribers': 1220565, 'created_utc': 1540945565.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': \"I'm outstanding.\", 'author_fullname': 't2_8dg9m', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': \"I'm going to stand outside, so if anyone asks...\", 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sokox', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 73, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 73, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540939588.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I&#39;m outstanding.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sokox', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Dropbackandpunt', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': False, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sokox/im_going_to_stand_outside_so_if_anyone_asks/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sokox/im_going_to_stand_outside_so_if_anyone_asks/', 'subreddit_subscribers': 1220565, 'created_utc': 1540910788.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': \"He's got 2020 vision.\", 'author_fullname': 't2_ie2juk4', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': \"The psychic I go to can predict what's going to happen two years from now.\", 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9slpqm', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 269, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 269, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540913588.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>He&#39;s got 2020 vision.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9slpqm', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'this_is_grand', 'num_crossposts': 0, 'num_comments': 7, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9slpqm/the_psychic_i_go_to_can_predict_whats_going_to/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9slpqm/the_psychic_i_go_to_can_predict_whats_going_to/', 'subreddit_subscribers': 1220565, 'created_utc': 1540884788.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'I will not sleep until I find a cure.', 'author_fullname': 't2_uzgxunx', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'Insomnia is a constant battle...', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9srnco', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 25, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 25, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540959551.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I will not sleep until I find a cure.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9srnco', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Saladbbar', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9srnco/insomnia_is_a_constant_battle/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9srnco/insomnia_is_a_constant_battle/', 'subreddit_subscribers': 1220565, 'created_utc': 1540930751.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'Igloos it together ', 'author_fullname': 't2_265kqgdi', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'How does a penguin build a house?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sui2z', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 8, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 8, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540979806.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>Igloos it together </p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sui2z', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Headsup_Eyesdown', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sui2z/how_does_a_penguin_build_a_house/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sui2z/how_does_a_penguin_build_a_house/', 'subreddit_subscribers': 1220565, 'created_utc': 1540951006.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'There are two couples on either side of me, kissing and making out.', 'author_fullname': 't2_16sa6x', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'I’m currently in between relationships.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sqmv0', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 32, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 32, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540952885.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>There are two couples on either side of me, kissing and making out.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sqmv0', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'porichoygupto', 'num_crossposts': 0, 'num_comments': 1, 'send_replies': False, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sqmv0/im_currently_in_between_relationships/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sqmv0/im_currently_in_between_relationships/', 'subreddit_subscribers': 1220565, 'created_utc': 1540924085.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'You haven’t? Thats surprising, they’ve been making headlines!', 'author_fullname': 't2_2e9sn49o', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'Have you heard about the new corduroy pillows?', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': True, 'name': 't3_9swes7', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 3, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 3, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540997825.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>You haven’t? Thats surprising, they’ve been making headlines!</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': True, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9swes7', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'thc-Baker', 'num_crossposts': 0, 'num_comments': 1, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9swes7/have_you_heard_about_the_new_corduroy_pillows/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9swes7/have_you_heard_about_the_new_corduroy_pillows/', 'subreddit_subscribers': 1220565, 'created_utc': 1540969025.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': '...you can tell when it becomes ap*parent*.', 'author_fullname': 't2_1n2eo9ij', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': \"I feel like some people here can't even tell when a joke is a dad joke\", 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9stdqz', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 10, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 10, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540971478.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>...you can tell when it becomes ap<em>parent</em>.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': True, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9stdqz', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'BringOutTheGMMP', 'num_crossposts': 0, 'num_comments': 3, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9stdqz/i_feel_like_some_people_here_cant_even_tell_when/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9stdqz/i_feel_like_some_people_here_cant_even_tell_when/', 'subreddit_subscribers': 1220565, 'created_utc': 1540942678.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'Mississippi.', 'author_fullname': 't2_5qgin', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': \"What do you call a hippie's wife?\", 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9spztw', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 38, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 38, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540948823.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>Mississippi.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9spztw', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'DrewciferCDXX', 'num_crossposts': 0, 'num_comments': 1, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9spztw/what_do_you_call_a_hippies_wife/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9spztw/what_do_you_call_a_hippies_wife/', 'subreddit_subscribers': 1220565, 'created_utc': 1540920023.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': '', 'author_fullname': 't2_48j3b', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'People who mix up entomology and etymology bug me in ways I cannot put into words.', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9sv9x3', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 4, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 4, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540986101.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': None, 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9sv9x3', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'Zarokima', 'num_crossposts': 0, 'num_comments': 0, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9sv9x3/people_who_mix_up_entomology_and_etymology_bug_me/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9sv9x3/people_who_mix_up_entomology_and_etymology_bug_me/', 'subreddit_subscribers': 1220565, 'created_utc': 1540957301.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'She had a change of heart.', 'author_fullname': 't2_9n7ic', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'A patient was initially doubtful of the success rate and concerned about her upcoming heart transplant, but after some reassuring...', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9stqfx', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 10, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 10, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540974012.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>She had a change of heart.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': True, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9stqfx', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'NomeSoap', 'num_crossposts': 0, 'num_comments': 1, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9stqfx/a_patient_was_initially_doubtful_of_the_success/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9stqfx/a_patient_was_initially_doubtful_of_the_success/', 'subreddit_subscribers': 1220565, 'created_utc': 1540945212.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'But three lefts do.', 'author_fullname': 't2_1872eyu4', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': \"Two wrongs don't make a right:\", 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9ssord', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 14, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 14, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540966535.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>But three lefts do.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9ssord', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'mistercathead', 'num_crossposts': 0, 'num_comments': 2, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9ssord/two_wrongs_dont_make_a_right/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9ssord/two_wrongs_dont_make_a_right/', 'subreddit_subscribers': 1220565, 'created_utc': 1540937735.0, 'media': None, 'is_video': False}\n{'approved_at_utc': None, 'subreddit': 'dadjokes', 'selftext': 'I have always wanted to be generous to a fault.', 'author_fullname': 't2_f8ijyr9', 'saved': False, 'mod_reason_title': None, 'gilded': 0, 'clicked': False, 'title': 'I once threw a quarter into the San Andreas...', 'link_flair_richtext': [], 'subreddit_name_prefixed': 'r/dadjokes', 'hidden': False, 'pwls': 6, 'link_flair_css_class': None, 'downs': 0, 'thumbnail_height': None, 'hide_score': False, 'name': 't3_9snabh', 'quarantine': False, 'link_flair_text_color': 'dark', 'author_flair_background_color': None, 'subreddit_type': 'public', 'ups': 79, 'domain': 'self.dadjokes', 'media_embed': {}, 'thumbnail_width': None, 'author_flair_template_id': None, 'is_original_content': False, 'user_reports': [], 'secure_media': None, 'is_reddit_media_domain': False, 'is_meta': False, 'category': None, 'secure_media_embed': {}, 'link_flair_text': None, 'can_mod_post': False, 'score': 79, 'approved_by': None, 'thumbnail': 'self', 'edited': False, 'author_flair_css_class': None, 'author_flair_richtext': [], 'gildings': {'gid_1': 0, 'gid_2': 0, 'gid_3': 0}, 'content_categories': None, 'is_self': True, 'mod_note': None, 'created': 1540930619.0, 'link_flair_type': 'text', 'wls': 6, 'banned_by': None, 'author_flair_type': 'text', 'contest_mode': False, 'selftext_html': '<!-- SC_OFF --><div class=\"md\"><p>I have always wanted to be generous to a fault.</p>\\n</div><!-- SC_ON -->', 'likes': None, 'suggested_sort': None, 'banned_at_utc': None, 'view_count': None, 'archived': False, 'no_follow': False, 'is_crosspostable': False, 'pinned': False, 'over_18': False, 'media_only': False, 'link_flair_template_id': None, 'can_gild': False, 'spoiler': False, 'locked': False, 'author_flair_text': None, 'visited': False, 'num_reports': None, 'distinguished': None, 'subreddit_id': 't5_2t0no', 'mod_reason_by': None, 'removal_reason': None, 'link_flair_background_color': '', 'id': '9snabh', 'is_robot_indexable': True, 'report_reasons': None, 'author': 'cleanandclever', 'num_crossposts': 0, 'num_comments': 4, 'send_replies': True, 'whitelist_status': 'all_ads', 'mod_reports': [], 'author_patreon_flair': False, 'author_flair_text_color': None, 'permalink': '/r/dadjokes/comments/9snabh/i_once_threw_a_quarter_into_the_san_andreas/', 'parent_whitelist_status': 'all_ads', 'stickied': False, 'url': 'https://www.reddit.com/r/dadjokes/comments/9snabh/i_once_threw_a_quarter_into_the_san_andreas/', 'subreddit_subscribers': 1220565, 'created_utc': 1540901819.0, 'media': None, 'is_video': False}\n"
],
[
"filtered = list(filter(lambda x: False if 'http' in x['data']['selftext'] else x , jokes))\nprint(len(filtered))",
"25\n"
],
[
"onlyUps = list(filter(lambda x: x if x['data']['ups'] >= 10 else False, filtered))\n\nformatedJokes = list(\n map(lambda x: \n {\n 'title': x['data']['title'],\n 'selftext': x['data']['selftext']\n }, onlyUps\n ))\n\ndef printJokes(jokes):\n for joke in jokes:\n print(joke['title'] + \"\\n\" + joke['selftext'] + \"\\n\\n\")\n \nprintJokes(formatedJokes)\n \n",
"A programmer gets sent to the store by his wife. His wife says, “Get a gallon of milk, and if they have eggs, get a dozen.”\nThe programmer returns home with 12 gallons of milk and says, “They had eggs.”\n\nedit: I know guys, I know, it’s supposed to be 13, I messed up the wording, please forgive me\n\n\nDaddy, what is clickbait?\n\n\n\nHow do you find Will Smith in the snow?\nYou look for fresh prints\n\n\nSo I can legit ride a unicycle. My mom told me it was a waste of time and would never get me anywhere in life....\nI said “no mom! Where there’s a wheel, there’s a way!”\n\n\n[grocery store] Ok, milk...check, eggs...check, tomatoes...check.\n“Sir, please stop writing separate checks for every single item.”\n\n\nMy friend told me not to drink from the wall.\nI knew he meant well. \n\n\nA recent study found out how much sleep a normal teenager needs.\nJust five more minutes.\n\n\nWhat do diarrhea and eye colour have in common?\nIt runs in your genes.\n\n\nWhat song do all Vampires hate the most?\n“You are my Sunshine”\n\n\nI tell dad jokes but I have no kids\nI’m a faux pa\n\n\nDid you hear about the Cheese Factory in France that exploded?\nThere's nothing left but de Brie\n\n\nVladimir Putin has been visiting all the old U.S.S.R. Countries.\nYou could say it was a Soviet Reunion. \n\n\nMy Catholic mother in law is renovating her kitchen.\nIt’s a counter reformation.\n\n\nI'm going to stand outside, so if anyone asks...\nI'm outstanding.\n\n\nThe psychic I go to can predict what's going to happen two years from now.\nHe's got 2020 vision.\n\n\nInsomnia is a constant battle...\nI will not sleep until I find a cure.\n\n\nI’m currently in between relationships.\nThere are two couples on either side of me, kissing and making out.\n\n\nI feel like some people here can't even tell when a joke is a dad joke\n...you can tell when it becomes ap*parent*.\n\n\nWhat do you call a hippie's wife?\nMississippi.\n\n\nA patient was initially doubtful of the success rate and concerned about her upcoming heart transplant, but after some reassuring...\nShe had a change of heart.\n\n\nTwo wrongs don't make a right:\nBut three lefts do.\n\n\nI once threw a quarter into the San Andreas...\nI have always wanted to be generous to a fault.\n\n\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa085833296370f082e31907b5660c97c6812b | 149,145 | ipynb | Jupyter Notebook | Spring2021/int_point.ipynb | Jhomanik/MIPT-Opt | c1629b93b7608081f2237278afd92ee426760a84 | [
"MIT"
] | 132 | 2016-09-05T09:24:55.000Z | 2022-03-28T14:10:05.000Z | Spring2021/int_point.ipynb | Jhomanik/MIPT-Opt | c1629b93b7608081f2237278afd92ee426760a84 | [
"MIT"
] | 32 | 2016-10-30T12:24:18.000Z | 2018-08-30T14:02:39.000Z | Spring2021/int_point.ipynb | Jhomanik/MIPT-Opt | c1629b93b7608081f2237278afd92ee426760a84 | [
"MIT"
] | 54 | 2017-03-09T14:20:26.000Z | 2021-12-26T08:32:51.000Z | 69.240947 | 34,336 | 0.782815 | [
[
[
"# Методы внутренней точки",
"_____no_output_____"
],
[
"## На прошлом семинаре\n\n- Задачи оптимизации с ограничениями на простые множества\n- Метод проекции градиента как частный случай проксимального градиентного метода\n- Метод условного градента (Франка-Вольфа) и его сходимость",
"_____no_output_____"
],
[
"## Задача выпуклой оптимизации с ограничениями типа равенств\n\n\\begin{equation*}\n\\begin{split}\n&\\min f(x) \\\\ \n\\text{s.t. } & Ax = b,\n\\end{split}\n\\end{equation*}\n\nгде $f$ - выпукла и дважды дифференцируема, $A \\in \\mathbb{R}^{p \\times n}$ и $\\mathrm{rank} \\; A = p < n$",
"_____no_output_____"
],
[
"### Двойственная задача\nДвойственная функция \n\n\\begin{equation*}\n\\begin{split}\ng(\\mu) & = -b^{\\top}\\mu + \\inf_x(f(x) + \\mu^{\\top}Ax) \\\\\n& = -b^{\\top}\\mu - \\sup_x((-A^{\\top}\\mu)^{\\top}x -f(x)) \\\\\n& = -b^{\\top}\\mu - f^*(-A^{\\top}\\mu)\n\\end{split}\n\\end{equation*}\n\nДвойственная задача\n\n$$\n\\max_\\mu -b^{\\top}\\mu - f^*(-A^{\\top}\\mu)\n$$\n\n**Подход 1**: найти сопряжённую функцию и решить безусловную задачу оптимизации",
"_____no_output_____"
],
[
"**Трудности**\n- не всегда легко восстановить решение прямой задачи по решению двойственной\n- сопряжённая функция $f^*$ должна быть дважды дифференцируемое для быстрого решения двойственной задачи. Это не всегда так.",
"_____no_output_____"
],
[
"### Условия оптимальности\n\n- $Ax^* = b$\n- $f'(x^*) + A^{\\top}\\mu^* = 0$\n\nили\n\n$$ \\begin{bmatrix} f' & A^{\\top} \\\\ A & 0 \\end{bmatrix} \\begin{bmatrix} x^{\\\\*} \\\\ \\mu^{\\\\*} \\end{bmatrix} = \\begin{bmatrix} 0 \\\\ b \\end{bmatrix} $$\n\n**Подход 2**: решить нелинейную в общем случае систему методом Ньютона.\n\n**Вопрос**: в каком случае система окажется линейной?",
"_____no_output_____"
],
[
"## Метод Ньютона для выпуклых задач с ограничениями типа равенств\n\n\\begin{equation*}\n\\begin{split}\n& \\min_v f(x) + f'(x)^{\\top}v + \\frac{1}{2}v^{\\top}f''(x)v\\\\\n\\text{s.t. } & A(x + v) = b\n\\end{split}\n\\end{equation*}\n\nИз условий оптимальности имеем\n\n$$ \\begin{bmatrix} f''(x) & A^{\\top} \\\\ A & 0 \\end{bmatrix} \\begin{bmatrix} v \\\\ w \\end{bmatrix} = \\begin{bmatrix} -f'(x) \\\\ 0 \\end{bmatrix} $$\n\n**Шаг метода Ньютона определён только для невырожденной матрицы!**",
"_____no_output_____"
],
[
"**Упражнение**. Посчитайте за сколько итераций метод Ньютона сойдётся для квадратичной функции с ограничениями типа равенств.",
"_____no_output_____"
],
[
"### Линеаризация условий оптимальности\n\n- $A(x + v) = b \\rightarrow Av = 0$\n- $f'(x + v) + A^{\\top}w \\approx f'(x) + f''(x)v + A^{\\top}w = 0$\n\nили\n\n- $f''(x)v + A^{\\top}w = -f'(x)$",
"_____no_output_____"
],
[
"### Псевдокод\n\n**Важно:** начальная точка должна лежать в допустимом множестве!\n\n```python\ndef NewtonEqualityFeasible(f, gradf, hessf, A, b, stop_crit, line_search, x0, tol):\n \n x = x0\n \n n = x.shape[0]\n \n while True:\n \n newton_matrix = [[hessf(x), A.T], [A, 0]]\n \n rhs = [-gradf(x), 0]\n \n w = solve_lin_sys(newton_matrix, rhs)\n \n h = w[:n]\n \n if stop_crit(x, h, gradf(x), **kwargs) < tol:\n \n break\n \n alpha = line_search(x, h, f, gradf(x), **kwargs)\n \n x = x + alpha * h\n \n return x\n```",
"_____no_output_____"
],
[
"### Критерий остановки\nПолучим выражение для значения\n\n$$\nf(x) - \\inf_v(\\hat{f}(x + v) \\; | \\; A(x+v) = b),\n$$\n\nгде $\\hat{f}$ - квадратичная аппроксимация функции $f$.\n\nДля этого \n\n$$\n\\langle h^{\\top} \\rvert \\cdot \\quad f''(x)h + A^{\\top}w = -f'(x)\n$$\n\nс учётом $Ah = 0$ получаем \n\n$$\nh^{\\top}f''(x)h = -f'(x)^{\\top}h\n$$\n\nТогда \n\n$$\n\\inf_v(\\hat{f}(x + v) \\; | \\; A(x+v) = b) = f(x) - \\frac{1}{2}h^{\\top}f''(x)h\n$$\n\n**Вывод:** величина $h^{\\top}f''(x)h$ является наиболее адекватным критерием остановки метода Ньютона.",
"_____no_output_____"
],
[
"### Теорема сходимости\n\nСходимость метода аналогична сходимости метода Ньютона для задачи безусловной оптимизации.\n\n**Теорема**\nПусть выполнены следующие условия\n- множество уровней $S = \\{ x \\; | \\; x \\in D(f), \\; f(x) \\leq f(x_0), \\; Ax = b \\}$ замкнуто и $x_0 \\in D(f), \\; Ax_0 = b$\n- для любых $x \\in S$ и $\\tilde{x} \\in S$ гессиан $f''(x)$ липшицев\n- на множестве $S$ $\\|f''(x)\\|_2 \\leq M $ и норма обратной матрицы KKT системы ограничена сверху\n\nТогда, метод Ньютона сходится к паре $(x^*, \\mu^*)$ линейно, а при достижении достаточной близости к решению - квадратично.",
"_____no_output_____"
],
[
"## Случай недопустимой начальной точки\n\n- Метод Ньютона требует чтобы начальная точка лежала в допустимом множестве\n- Что делать, если поиск такой точки неочевиден: например, если область определения $f$ не сопадает с $\\mathbb{R}^n$\n- Пусть начальная точка не является допустимой, в этом случае условия KKT можно записать так\n\n$$\n\\begin{bmatrix}\nf''(x) & A^{\\top}\\\\\nA & 0\n\\end{bmatrix}\n\\begin{bmatrix}\nv\\\\\nw\n\\end{bmatrix}\n = -\n\\begin{bmatrix}\nf'(x)\\\\\n{\\color{red}{Ax - b}}\n\\end{bmatrix}\n$$\n\n- Если $x$ допустима, то система совпадает с системой для обычного метода Ньютона",
"_____no_output_____"
],
[
"### Прямо-двойственная интерпретация\n\n- Метод *прямо-двойственный*, если на каждой итерации обновляются прямые и двойственные переменные\n- Покажем, что это значит. Для этого запишем условия оптимальности в виде\n\n$$\nr(x^*, \\mu^*) = (r_d(x^*, \\mu^*), r_p(x^*, \\mu^*)) = 0,\n$$\n\nгде $r_p(x, \\mu) = Ax - b$ и $r_d(x, \\mu) = f'(x) + A^{\\top}\\mu$\n- Решим систему методом Ньютона:\n\n$$\nr(y + z) \\approx r(y) + Dr(y)z = 0\n$$\n",
"_____no_output_____"
],
[
"- Прямо-двойственный шаг в методе Ньютона определим как решение линейной системы\n\n$$\nDr(y)z = -r(y)\n$$\n\nили более подробно\n\n$$\n\\begin{bmatrix}\nf''(x) & A^{\\top}\\\\\nA & 0\n\\end{bmatrix}\n\\begin{bmatrix}\nz_p\\\\\nz_d\n\\end{bmatrix}\n = -\n\\begin{bmatrix}\nr_d(x, \\mu)\\\\\nr_p(x, \\mu)\n\\end{bmatrix}\n= - \n\\begin{bmatrix}\nf'(x) + A^{\\top}\\mu\\\\\nAx - b\n\\end{bmatrix}\n$$\n\n- Заменим $z_d^+ = \\mu + z_d$ и получим\n\n$$\n\\begin{bmatrix}\nf''(x) & A^{\\top}\\\\\nA & 0\n\\end{bmatrix}\n\\begin{bmatrix}\nz_p\\\\\nz_d^+\n\\end{bmatrix}\n= - \n\\begin{bmatrix}\nf'(x)\\\\\nAx - b\n\\end{bmatrix}\n$$\n\n- Система полностью эквивалентна ранее полученной в обозначениях \n\n$$\nv = z_p \\qquad w = z_d^+ = \\mu + z_d \n$$\n\n- Метод Ньютона даёт шаг для прямой переменной и обновлённое значение для двойственной",
"_____no_output_____"
],
[
"### Способ инициализации\n\n- Удобный способ задания начального приближения: найти точку из области определения $f$ гораздо проще, чем из пересечения области определения и допустимого множества\n- Метод Ньютона с недопустимой начальной точкой не может определить согласованность ограничений",
"_____no_output_____"
],
[
"### Псевдокод\n\n```python\ndef NewtonEqualityInfeasible(f, gradf, hessf, A, b, stop_crit, line_search, x0, mu0, tol):\n \n x = x0\n \n mu = mu0\n \n n = x.shape[0]\n \n while True:\n \n z_p, z_d = ComputeNewtonStep(hessf(x), A, b)\n \n if stop_crit(x, z_p, z_d, gradf(x), **kwargs) < tol:\n \n break\n \n alpha = line_search(x, z_p, z_d, f, gradf(x), **kwargs)\n \n x = x + alpha * z_p\n \n mu = z_d\n \n return x\n```",
"_____no_output_____"
],
[
"### Критерий остановки и линейный поиск\n\n- Изменение $r_p$ после шага $z_p$\n\n$$\nA(x + \\alpha z_p) - b = [A(x + z_p) = b] = Ax + \\alpha(b - Ax) - b = (1 - \\alpha)(Ax - b)\n$$\n\n- Итоговое изменение после $k$ шагов\n\n$$\nr^{(k)} = \\prod_{i=0}^{k-1}(1 - \\alpha^{(i)})r^{(0)}\n$$\n\n- Критерий остановки: $Ax = b$ и $\\|r(x, \\mu)\\|_2 \\leq \\varepsilon$",
"_____no_output_____"
],
[
"- Линейный поиск: $c \\in (0, 1/2)$, $\\beta = (0, 1)$\n\n```python\ndef linesearch(r, x, mu, z_p, z_d, c, beta):\n \n alpha = 1\n \n while np.linalg.norm(r(x + alpha * z_p, mu + alpha * z_d)) >= (1 - c * alpha) * np.linalg.norm(r(x, mu)):\n \n alpha *= beta\n \n return alpha\n\n```",
"_____no_output_____"
],
[
"### Теорема сходимости\n\nРезультат аналогичен результаты для допустимой начальной точки\n\n**Теорема.** Пусть\n- множество подуровней $S = \\{(x, \\mu) \\; | \\; x \\in D(f), \\; \\| r(x, \\mu) \\|_2 \\leq \\| r(x_0, \\mu_0) \\|_2 \\}$ замкнуто\n- на множестве $S$ норма матрицы обратной к ККТ матрице ограничена\n- гессиан липшицев на $S$.\n\nТогда сходимость метода линейна при удалении от решении и квадратичная при достаточном приближении к решению.",
"_____no_output_____"
],
[
"## Общая задача выпуклой оптимизации\n\n\\begin{equation*}\n\\begin{split}\n& \\min_{x \\in \\mathbb{R}^n} f_0(x)\\\\\n\\text{s.t. } & f_i (x) \\leq 0 \\qquad i=1,\\ldots,m\\\\\n& Ax = b,\n\\end{split}\n\\end{equation*}\n\nгде $f_i$ - выпуклые и дважды непрерывно дифференцируемы, $A \\in \\mathbb{R}^{p \\times n}$ и $\\mathrm{rank} \\; A = p < n$. \n\nПредполагаем, что задача строго разрешима, то есть выполняется условие Слейтера.",
"_____no_output_____"
],
[
"## Условия оптимальности\n\n- Разрешимость прямой задачи\n\n$$\nAx^* = b, \\; f_i(x^*) \\leq 0, \\; i = 1,\\ldots,m\n$$\n\n- Разрешимость двойственной задачи\n\n$$\n\\lambda^* \\geq 0\n$$\n\n- Стационарность лагранжиана\n\n$$\nf'_0(x^*) + \\sum_{i=1}^m \\lambda^*_if'_i(x^*) + A^{\\top}\\mu^* = 0\n$$\n\n- Условие дополняющей нежёсткости\n\n$$\n\\lambda^*_i f_i(x^*) = 0, \\qquad i = 1,\\ldots, m\n$$",
"_____no_output_____"
],
[
"## Идея\n\n- Свести задачу с ограничениями типа **неравенств** к последовательности задач с ограничениями типа **равенств**\n- Использовать методы для решения задачи с ограничениями типа равенств",
"_____no_output_____"
],
[
"\\begin{equation*}\n\\begin{split}\n& \\min f_0(x) + \\sum_{i=1}^m I_-(f_i(x))\\\\\n\\text{s.t. } & Ax = b,\n\\end{split}\n\\end{equation*}\n\nгде $I_-$ - индикаторная функция\n\n$$\nI_-(u) = \n\\begin{cases}\n0, & u \\leq 0\\\\\n\\infty, & u > 0\n\\end{cases}\n$$\n\n**Проблема.** Теперь целевая функция - **недифференцируема**.",
"_____no_output_____"
],
[
"## Логарифмический барьер\n\n**Идея.** Приблизить функцию $I_-(u)$ функцией\n\n$$\n\\hat{I}_-(u) = -t\\log(-u),\n$$\n\nгде $t > 0$ - параметр.\n\n- Функции $I_-(u)$ и $\\hat{I}_-(u)$ выпуклые и неубывающие\n- Однако $\\hat{I}_-(u)$ **дифференцируема** и приближается к $I_-(u)$ при $t \\to 0$",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nx = np.linspace(-2, 0, 100000, endpoint=False)\nplt.figure(figsize=(10, 6))\nfor t in [0.1, 0.5, 1, 1.5, 2]:\n plt.plot(x, -t * np.log(-x), label=r\"$t = \" + str(t) + \"$\")\nplt.legend(fontsize=20)\nplt.xticks(fontsize=20)\nplt.yticks(fontsize=20)\nplt.xlabel(\"u\", fontsize=20)",
"_____no_output_____"
]
],
[
[
"### \"Ограниченная\" задача\n\n\\begin{equation*}\n\\begin{split}\n& \\min f_0(x) + \\sum_{i=1}^m -t \\log(-f_i(x))\\\\\n\\text{s.t. } & Ax = b\n\\end{split}\n\\end{equation*}\n\n- Задача по-прежнему **выпуклая**\n- Функция \n\n$$\n\\phi(x) = -\\sum\\limits_{i=1}^m \\log(-f_i(x))\n$$ \nназывается *логарифмическим барьером*. Её область определения - множество точек, для котороых ограничения типа неравенств выполняются строго.\n\n**Упражнение.** Найдите градиент и гессиан $\\phi(x)$",
"_____no_output_____"
],
[
"## Центральный путь\n\nДля каждого $t > 0$ \"ограниченная\" задача имеет единственное решение $x^*(t)$.\n\n**Определение.** Последовательность $x^*(t)$ для $t > 0$ образует *центральный путь*.",
"_____no_output_____"
],
[
"## Условия оптимальности для \"ограниченной\" задачи\n\n- Разрешимость прямой задачи\n\n$$\nAx^*(t) = b, \\; f_i(x^*) < 0, \\; i = 1,\\ldots,m\n$$\n\n- Стационарность лагранжиана\n\n\\begin{equation*}\n\\begin{split}\n& f'_0(x^*(t)) + \\phi'(x^*(t)) + A^{\\top}\\hat{\\mu} = \\\\\n& = f'_0(x^*(t)) - t\\sum_{i=1}^m \\frac{f_i'(x^*(t))}{f_i(x^*(t))} + A^{\\top}\\hat{\\mu} = 0\n\\end{split}\n\\end{equation*}",
"_____no_output_____"
],
[
"- Обозначим \n\n$$\n\\lambda^*_i(t) = -\\frac{t}{f_i(x^*(t))} \\; i=1,\\ldots,m \\text{ и } \\mu^* = \\hat{\\mu}\n$$\n\n- Тогда условие оптимальности можно записать как\n\n$$\nf'_0(x^*(t)) + \\sum_{i=1}^m \\lambda^*_i(t)f_i'(x^*(t)) + A^{\\top}\\mu^* = 0\n$$\n\n- Тогда $x^*(t)$ минимизирует лагранжиан \n\n$$\nL = f_0(x) + \\sum_{i=1}^m \\lambda_if_i(x) + \\mu^{\\top}(Ax - b)\n$$\n\nдля $\\lambda = \\lambda^*(t)$ и $\\mu = \\mu^*$.",
"_____no_output_____"
],
[
"### Зазор двойственности\n\n- Двойственная функция $g(\\lambda^*(t), \\mu^*)$ конечна и представима в виде\n\n\\begin{equation*}\n\\begin{split}\ng(\\lambda^*(t), \\mu^*) & = f_0(x^*(t)) + \\sum_{i=1}^m \\lambda^*_i(t)f_i(x^*(t)) + (\\mu^*)^{\\top}(Ax^*(t) - b)\\\\\n& = f_0(x^*(t)) - mt\n\\end{split}\n\\end{equation*}\n\n- Зазор двойственности\n\n$$\nf_0(x^*(t)) - p^* \\leq mt\n$$\n\n- При $t \\to 0$ зазор двойственности равен 0 и центральный путь сходится к решению исходной задачи.",
"_____no_output_____"
],
[
"## ККТ интерпретация\n\nУсловия оптимальности для \"ограниченной\" задачи эквивалентны условиям оптимальности для исходной задачи если\n\n$$\n-\\lambda_i f_i(x) = 0 \\to - \\lambda_i f_i(x) = t \\quad i = 1,\\ldots, m\n$$",
"_____no_output_____"
],
[
"## Физическая интерпретация\n- Предположим, что ограничений типа равенства нет\n- Рассмотрим неквантовую частицу в поле сил\n- Каждому ограничению $f_i(x) \\leq 0$ поставим в соответствие силу\n$$\nF_i(x) = -\\nabla(-\\log(-f_i(x))) = \\frac{f'_i(x)}{f_i(x)}\n$$\n- Целевой функции также поставим в соответствие силу \n$$\nF_0(x) = -\\frac{f'_0(x)}{t}\n$$\n- Каждая точка из центрального пути $x^*(t)$ - это положение частицы, в котором выполняется баланс сил ограничений и целевой функции\n- С уменьшением $t$ сила для целевой функции доминирует, и частица стремится занять положение, расположенное ближе к оптимальному\n- Поскольку сила ограничений стремится к бесконечности при приближении частицы к границе, частица никогда не вылетит из допустимого множества",
"_____no_output_____"
],
[
"## Барьерный метод\n\n- $x_0$ должна быть допустимой\n- $t_0 > 0$ - начальное значение параметра\n- $\\alpha \\in (0, 1)$ - множитель для уменьшения $t_0$\n\n```python\ndef BarrierMethod(f, x0, t0, tol, alpha, **kwargs):\n \n x = x0\n \n t = t0\n \n while True:\n \n x = SolveBarrierProblem(f, t, x, **kwargs)\n \n if m * t < tol:\n \n break\n \n t *= alpha\n \n return x\n\n```",
"_____no_output_____"
],
[
"### Точность решения \"ограниченной\" задачи\n\n- Точное решение \"ограниченной\" задачи не требуется, так как приближённый центральный путь всё равно сойдётся к решению исходной задачи\n- Двойственные переменные перестают быть двойственными при неточном решении, но это поправимо введением поправочных слагаемых\n- Разница в стоимости точного и неточного центрального пути - несколько итераций метода Ньютона, поэтому существенного ускорения добиться нельзя",
"_____no_output_____"
],
[
"### Выбор параметров\n- Множитель $\\alpha$\n - При $\\alpha \\sim 1$, **мало** итераций нужно для решения \"ограниченной\" задачи, но **много** для нахождения точного решения исходной задачи\n - При $\\alpha \\sim 10^{-5}$ **много** итераций нужно для решения \"ограниченной\" задачи, но **мало** для нахождения точного решения исходной задачи\n- Начальный параметр $t_0$\n - Аналогичная альтернатива как и для параметра $\\alpha$\n - Параметр $t_0$ задаёт начальную точку для центрального пути",
"_____no_output_____"
],
[
"### Почти теорема сходимости\n\n- Как было показано выше при $t \\to 0$ барьерный метод сходится к решению исходной задачи\n- Скорость сходимости напрямую связана с параметрами $\\alpha$ и $t_0$, как показано ранее\n- Основная сложность - быстрое решение вспомогательных задач методом Ньютона",
"_____no_output_____"
],
[
"## Задача поиска допустимого начального приближения\n\n- Барьерный метод требует допустимого начального приближения\n- Метод разбивается на две фазы\n - Первая фаза метода ищет допустимое начальное приближение\n - Вторая фаза использует найденное начальное приближение для запуска барьерного метода",
"_____no_output_____"
],
[
"### Первая фаза метода\n\nПростой поиск допустимой точки\n\n\\begin{equation*}\n\\begin{split}\n& \\min s\\\\\n\\text{s.t. } & f_i(x) \\leq s\\\\\n& Ax = b\n\\end{split}\n\\end{equation*}\n\n- эта задача всегда имеет строго допустимое начальное приближение\n- если $s^* < 0$, то $x^*$ строго допустима и может быть использована в барьерном методе\n- если $s^* > 0$, то задача не разрешима и допустимое множество пусто",
"_____no_output_____"
],
[
"### Сумма несогласованностей\n\n\\begin{equation*}\n\\begin{split}\n& \\min s_1 + \\ldots + s_m\\\\\n\\text{s.t. } & f_i(x) \\leq s_i\\\\\n& Ax = b\\\\\n& s \\geq 0\n\\end{split}\n\\end{equation*}\n\n- оптимальное значене равно нулю и достигается тогда и только тогда когда система ограничений совместна\n- если задача неразрешима, то можно определить какие ограничения к этому приводят, то есть какие $s_i > 0$ ",
"_____no_output_____"
],
[
"### Вторая фаза метода\n\n- После получения допустимой начальной точки $x_0$ выполняется обычный метод Ньютона для задачи с ограничениями равенствами",
"_____no_output_____"
],
[
"## Прямо-двойственный метод\n\nПохож на барьерный метод, но\n- нет разделения на внешние итерации и внутренние: на каждой итерации обновляются прямые и двойственные переменные\n- направление определяется методом Ньютона, применённого к модифицированной системе ККТ\n- последовательность точек в прямо-двойственном методе не обязательно допустимы \n- работает даже когда задача не строго допустима",
"_____no_output_____"
],
[
"### Сходимость для квадратичной целевой функции\n\nПри некоторых предположениях о начальной точке и начальном значении $\\mu$, можно показать, что для достижения $\\mu_k \\leq \\varepsilon$ потребуется \n\n$$\n\\mathcal{O}\\left(\\sqrt{n}\\log \\left( \\frac{1}{\\varepsilon}\\right)\\right)\n$$ \nитераций\n\n\nДоказательство и все детали можно найти [тут](https://epubs.siam.org/doi/book/10.1137/1.9781611971453?mobileUi=0) или [тут](https://www.maths.ed.ac.uk/~gondzio/reports/ipmXXV.pdf)\n\n- Сравните с методами типа градиентного спуска, которые дают сходимость типа $\\mathcal{O}\\left( \\frac{1}{\\varepsilon} \\right)$\n- Зависит от размерности как $\\sqrt{n}$\n- На практике зависимость от размерности ещё слабее",
"_____no_output_____"
],
[
"## Резюме\n\n- Метод Ньютона для выпуклой задачи с оганичениями типа равенств\n- Случай недопустимой начальной точки\n- Прямой барьерный метод\n- Прямо-двойственный метод",
"_____no_output_____"
],
[
"## Применение методов внутренней точки к задаче линейного программирования\n\nИсходная задача\n\\begin{align*}\n&\\min_x c^{\\top}x \\\\\n\\text{s.t. } & Ax = b\\\\\n& x_i \\geq 0, \\; i = 1,\\dots, n\n\\end{align*}\n\nАппроксимированная задача\n\\begin{align*}\n&\\min_x c^{\\top}x {\\color{red}{- \\mu \\sum\\limits_{i=1}^n \\ln x_i}} \\\\\n\\text{s.t. } & Ax = b\\\\\n\\end{align*}\nдля некоторого $\\mu > 0$",
"_____no_output_____"
],
[
"### Барьерная функция\n\n**Определение.** Функция $B(x, \\mu) = -\\mu\\ln x$ называется *барьерной* для задачи с ограничением $x \\geq 0$.\n\nБолее подробно о таких функциях будет рассказано в контексте нелинейной условной оптимизации...",
"_____no_output_____"
],
[
"### Что произошло?\n\n- Сделали из линейной задачу нелинейную\n- Перенесли ограничение типа неравенства в целевую функцию\n- Ввели дополнительный параметр $\\mu$",
"_____no_output_____"
],
[
"### Почему это хорошо?\n\nПереход к задаче с ограничениями типа равенств $\\to$ упрощение условий оптимальности, в частности\n\n- Исключено требование дополняющей нежёсткости\n- Исключено условие неотрицательности множителя Лагранжа для ограничения типа неравенства",
"_____no_output_____"
],
[
"### Условия оптимальности\n\n- Лагранжиан: $L = c^{\\top}x - \\mu\\sum\\limits_{i=1}^n \\ln x_i + \\lambda^{\\top}(Ax - b)$\n- Стационарная точка $L$: \n\n$$\nc - \\mu X^{-1}e + A^{\\top}\\lambda = 0,\n$$\n\nгде $X = \\mathrm{diag}(x_1, \\dots, x_n)$ и $e = [1, \\dots, 1]$\n- Ограничение типа равенства: $Ax = b$",
"_____no_output_____"
],
[
"Пусть $s = \\mu X^{-1}e$, тогда условия оптимальности можно переписать так:\n- $A^{\\top}\\lambda + c - s = 0 $\n- $Xs = {\\color{red}{\\mu e}}$\n- $Ax = b$\n\nТакже $x > 0 \\Rightarrow s > 0$ ",
"_____no_output_____"
],
[
"## Сравнение с условиями оптимальности для исходной задачи\n\n- Лагранжиан: $L = c^{\\top}x + \\lambda^{\\top}(Ax - b) - s^{\\top}x$\n- Условие стационарности: $c + A^{\\top}\\lambda - s = 0$\n- Допустимость прямой задачи: $Ax = b, \\; x \\geq 0$\n- Допустимость двойственной: $s \\geq 0$\n- Условие дополняющей нежёсткости: $s_ix_i = 0$",
"_____no_output_____"
],
[
"### После упрощения\n\n- $A^{\\top}\\lambda + c - s = 0$\n- $Ax = b$\n- $Xs = {\\color{red}{0}}$\n- $x \\geq 0, \\; s \\geq 0$",
"_____no_output_____"
],
[
"### Вывод\n\n- Введение барьерной функции c множителем $\\mu$ эквивалентно релаксации условий дополняющей нежёсткости на параметр $\\mu$\n- При $\\mu \\to 0$ решения задач совпадают!\n- Итеративное решение задачи с барьерной функцией вместе с уменьшением $\\mu$. Последовательность решений сойдётся к вершине симплекса по траектории из точек, лежащих внутри симплекса.",
"_____no_output_____"
],
[
"### Общая схема\n```python\ndef GeneralInteriorPointLP(c, A, b, x0, mu0, rho, tol):\n \n x = x0\n \n mu = mu0\n \n e = np.ones(c.shape[0])\n \n while True:\n \n primal_var, dual_var = StepInsideFeasibleSet(c, A, b, x, mu)\n \n mu *= rho\n \n if converge(primal_var, dual_var, c, A, b, tol) and mu < tol:\n \n break\n \n return x\n```",
"_____no_output_____"
],
[
"## Как решать задачу с барьерной функцией?\n\n- Прямой метод \n- Прямо-двойственный метод",
"_____no_output_____"
],
[
"## Прямой метод\n\nВспомним исходную задачу:\n\\begin{align*}\n&\\min_x c^{\\top}x - \\mu \\sum\\limits_{i=1}^n \\ln x_i \\\\\n\\text{s.t. } & Ax = b\\\\\n\\end{align*}\n\nИдея: приблизим целевую функцию до второго порядка, как в методе Ньютона.",
"_____no_output_____"
],
[
"### Реализация\nНа $(k+1)$-ой итерации необходимо решить следующую задачу: \n\n\\begin{align*}\n&\\min_p \\frac{1}{2}p^{\\top}Hp + g^{\\top}p\\\\\n\\text{s.t. } & A(x_k + p) = b,\\\\\n\\end{align*}\n\nгде $H = \\mu X^{-2}$ - гессиан, и $g = c - \\mu X^{-1}e$ - градиент.",
"_____no_output_____"
],
[
"### Снова KKT\n\nВыпишем условия ККТ для этой задачи\n- $Hp + g + A^{\\top}\\lambda = 0$\n- $Ap = 0$\n\nили\n$$\\begin{bmatrix} H & A^{\\top}\\\\ A & 0 \\end{bmatrix} \\begin{bmatrix} p\\\\ \\lambda \\end{bmatrix} = \\begin{bmatrix} -g \\\\ 0 \\end{bmatrix}$$",
"_____no_output_____"
],
[
"Из первой строки:\n\n$$\n-\\mu X^{-2}p + A^{\\top}\\lambda = c - \\mu X^{-1}e\n$$\n\n$$\n-\\mu Ap + AX^{2}A^{\\top}\\lambda = AX^2c - \\mu AXe\n$$\n\n$$\nAX^{2}A^{\\top}\\lambda = AX^2c - \\mu AXe\n$$\n\nТак как $X \\in \\mathbb{S}^n_{++}$ и $A$ полного ранга, то уравнение имеет единственное решение $\\lambda^*$.",
"_____no_output_____"
],
[
"### Найдём направление $p$\n\n$$\n-\\mu p + X^2A^{\\top}\\lambda^* = X^2c - \\mu Xe = X^2c - \\mu x\n$$\n\n$$\np = x + \\frac{1}{\\mu}X^2(A^{\\top}\\lambda^* - c)\n$$",
"_____no_output_____"
],
[
"### Способы решения системы из ККТ\n\n1. Прямой способ: формирование матрицы $(n + m) \\times (n + m)$ и явное решение системы - $\\frac{1}{3}(n + m)^3$\n2. Последовательное исключение переменных:\n - $Hp + A^{\\top}\\lambda = -g$, $p = -H^{-1}(g + A^{\\top}\\lambda)$\n - $Ap = -AH^{-1}(g + A^{\\top}\\lambda) = -AH^{-1}A^{\\top}\\lambda - AH^{-1}g = 0$\n \n Здесь матрица $-AH^{-1}A^{\\top}$ есть *дополнение по Шуру* матрицы $H$.\n3. Алгоритм вычисления решения при последовательном исключении переменных\n - Вычислить $H^{-1}g$ и $H^{-1}A^{\\top}$ - $f_H + (m+1)s_H$ операций\n - Вычислить дополнение по Шуру $-AH^{-1}A^{\\top}$ - $\\mathcal{O}(m^2n)$\n - Найти $\\lambda$ - $\\frac{1}{3}m^3$ операций\n - Найти $p$ - $s_H + \\mathcal{O}(mn)$ операций\n4. Итого: $f_H + ms_H + \\frac{m^3}{3} + \\mathcal{O}(m^2n)$ уже гораздо быстрее прямого способа",
"_____no_output_____"
],
[
"### Используем структуру матрицы $H$\n\n- В нашем случае $H = \\mu X^{-2}$ - диагональная матрица!\n- $f_H$ - $n$ операций\n- $s_H$ - $n$ операций\n- Итоговая сложность $\\frac{m^3}{3} + \\mathcal{O}(m^2n)$ операций, где $m \\ll n$",
"_____no_output_____"
],
[
"### Поиск шага $\\alpha$\n\n- Обычный линейный поиск с условиями достаточного убывания\n- Условие $A(x_k + \\alpha p) = b$ выполняется автоматически",
"_____no_output_____"
],
[
"### Псевдокод прямого барьерного метода\n\n```python\ndef PrimalBarrierLP(c, A, b, x0, mu0, rho, tol):\n \n x = x0\n \n mu = mu0\n \n e = np.ones(x.shape[0])\n \n while True:\n \n p, lam = ComputeNewtonDirection(c, x, A, mu)\n \n alpha = line_search(p, mu, c, x)\n \n x = x + alpha * p\n \n mu = rho * mu\n \n if mu < tol and np.linalg.norm(x.dot(c - A.T.dot(lam)) - mu * e) < tol:\n \n break\n \n return x\n```",
"_____no_output_____"
],
[
"## Сравнение барьерного метода и прямого метода внутренней точки\n\n- Пример Klee-Minty c прошлого семинара\n\\begin{align*}\n& \\max_{x \\in \\mathbb{R}^n} 2^{n-1}x_1 + 2^{n-2}x_2 + \\dots + 2x_{n-1} + x_n\\\\\n\\text{s.t. } & x_1 \\leq 5\\\\\n& 4x_1 + x_2 \\leq 25\\\\\n& 8x_1 + 4x_2 + x_3 \\leq 125\\\\\n& \\ldots\\\\\n& 2^n x_1 + 2^{n-1}x_2 + 2^{n-2}x_3 + \\ldots + x_n \\leq 5^n\\\\\n& x \\geq 0\n\\end{align*}\n- Какая сложность работы симплекс метода?",
"_____no_output_____"
],
[
"- Сведение к стандартной форме\n\n\\begin{align*}\n& \\min_{x, \\; z} -c^{\\top}x \\\\\n\\text{s.t. } & Ax + z = b\\\\\n& x \\geq 0, \\quad z \\geq 0\n\\end{align*}\n\n- Сравним скорость работы прямого барьерного метода и симплекс-метода",
"_____no_output_____"
]
],
[
[
"import numpy as np\n%matplotlib inline\nimport matplotlib.pyplot as plt\nimport scipy.optimize as scopt\nimport scipy.linalg as sclin",
"_____no_output_____"
],
[
"def NewtonLinConstraintsFeasible(f, gradf, hessf, A, x0, line_search, linsys_solver, args=(), \n disp=False, disp_conv=False, callback=None, tol=1e-6, max_iter=100, **kwargs):\n x = x0.copy()\n n = x0.shape[0]\n iteration = 0\n lam = np.random.randn(A.shape[0])\n while True:\n gradient, hess = gradf(x, *args), hessf(x, *args)\n h = linsys_solver(hess, A, gradient)\n descent_dir = h[:n]\n decrement = descent_dir.dot(hessf(x, *args).dot(descent_dir))\n if decrement < tol:\n if disp_conv:\n print(\"Tolerance achieved! Decrement = {}\".format(decrement))\n break\n alpha = line_search(x, descent_dir, f, gradf, args, **kwargs)\n if alpha < 1e-16:\n if disp_conv:\n print(\"Step is too small!\")\n x = x + alpha * descent_dir\n if callback is not None:\n callback((descent_dir, x))\n iteration += 1\n if disp:\n print(\"Current function val = {}\".format(f(x, *args)))\n print(\"Newton decrement = {}\".format(decrement))\n if iteration >= max_iter:\n if disp_conv:\n print(\"Maxiter exceeds!\")\n break\n res = {\"x\": x, \"num_iter\": iteration, \"tol\": decrement}\n return res",
"_____no_output_____"
],
[
"def simple_solver(hess, A, gradient):\n n = hess.shape[0]\n n_lin_row, n_lin_col = A.shape\n modified_hess = np.zeros((n + n_lin_row, n + n_lin_row))\n modified_hess[:n, :n] = hess\n modified_hess[n:n + n_lin_row, :n_lin_col] = A\n modified_hess[:n_lin_col, n:n + n_lin_row] = A.T\n rhs = np.zeros(n + n_lin_row)\n rhs[:n] = -gradient\n h = np.linalg.solve(modified_hess, rhs)\n return h\n\ndef elimination_solver(hess, A, gradient):\n inv_hess_diag = np.divide(1.0, np.diag(hess))\n inv_hess_grad = np.multiply(-inv_hess_diag, gradient)\n rhs = A.dot(inv_hess_grad)\n L_inv_hess = np.sqrt(inv_hess_diag)\n AL_inv_hess = A * L_inv_hess\n # print(AL_inv_hess.shape)\n S = AL_inv_hess.dot(AL_inv_hess.T)\n# cho_S = sclin.cho_factor(S)\n# w = sclin.cho_solve(cho_S, rhs)\n w = np.linalg.solve(S, rhs)\n v = np.subtract(inv_hess_grad, np.multiply(inv_hess_diag, A.T.dot(w)))\n# h = np.zeros(hess.shape[1] + A.shape[0])\n# h[:hess.shape[1]] = v\n# h[hess.shape[1]:hess.shape[1] + A.shape[0]] = w\n return v",
"_____no_output_____"
],
[
"def backtracking(x, descent_dir, f, grad_f, args, **kwargs):\n beta1 = kwargs[\"beta1\"]\n rho = kwargs[\"rho\"]\n alpha = 1\n while f(x + alpha * descent_dir, *args) >= f(x, *args) + beta1 * alpha * grad_f(x, *args).dot(descent_dir) \\\n or np.isnan(f(x + alpha * descent_dir, *args)):\n alpha *= rho\n if alpha < 1e-16:\n break\n return alpha",
"_____no_output_____"
],
[
"def generate_KleeMinty_test_problem(n):\n c = np.array([2**i for i in range(n)])\n c = -c[::-1]\n bounds = [(0, None) for i in range(n)]\n b = np.array([5**(i+1) for i in range(n)])\n a = np.array([1] + [2**(i+1) for i in range(1, n)])\n A = np.zeros((n, n))\n for i in range(n):\n A[i:, i] = a[:n-i]\n return c, A, b, bounds",
"_____no_output_____"
],
[
"n = 7\nc, A, b, _ = generate_KleeMinty_test_problem(n)\neps = 1e-10\ndef f(x, c, mu):\n n = c.shape[0]\n return c.dot(x[:n]) - mu * np.sum(np.log(eps + x))\n\ndef gradf(x, c, mu):\n grad = np.zeros(len(x))\n n = c.shape[0]\n grad[:n] = c - mu / (eps + x[:n])\n grad[n:] = -mu / (eps + x[n:])\n return grad\n\ndef hessf(x, c, mu):\n return mu * np.diag(1. / (eps + x)**2)\n\nA_lin = np.zeros((n, n + A.shape[0]))\nA_lin[:n, :n] = A\nA_lin[:n, n:n + A.shape[0]] = np.eye(A.shape[0])\nmu = 0.1",
"_____no_output_____"
]
],
[
[
"#### Проверим верно ли вычисляется градиент",
"_____no_output_____"
]
],
[
[
"scopt.check_grad(f, gradf, np.random.rand(n), c, mu)",
"_____no_output_____"
]
],
[
[
"### Выбор начального приближения допустимого по ограничениям и области определения целевой функции",
"_____no_output_____"
]
],
[
[
"x0 = np.zeros(2*n)\nx0[:n] = np.random.rand(n)\nx0[n:2*n] = b - A.dot(x0[:n])\nprint(np.linalg.norm(A_lin.dot(x0) - b))\nprint(np.sum(x0 <= 1e-6))",
"1.1457157353758233e-13\n0\n"
]
],
[
[
"### Проверим сходимость",
"_____no_output_____"
]
],
[
[
"hist_conv = []\ndef cl(x):\n hist_conv.append(x)\nres = NewtonLinConstraintsFeasible(f, gradf, hessf, A_lin, x0, backtracking, elimination_solver, (c, mu), callback=cl,\n max_iter=2000, beta1=0.1, rho=0.7)\nprint(\"Decrement value = {}\".format(res[\"tol\"]))\nfstar = f(res[\"x\"], c, mu)\nhist_conv_f = [np.abs(fstar - f(descdir_x[1], c, mu)) for descdir_x in hist_conv]\nplt.figure(figsize=(12, 5))\nplt.subplot(1,2,1)\nplt.semilogy(hist_conv_f)\nplt.xlabel(\"Number of iteration, $k$\", fontsize=18)\nplt.ylabel(\"$f^* - f_k$\", fontsize=18)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)\n\nhist_conv_x = [np.linalg.norm(res[\"x\"] - x[1]) for x in hist_conv]\nplt.subplot(1,2,2)\nplt.semilogy(hist_conv_x)\nplt.xlabel(\"Number of iteration, $k$\", fontsize=18)\nplt.ylabel(\"$\\| x_k - x^*\\|_2$\", fontsize=18)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)\nplt.tight_layout()",
"/Users/alex/anaconda3/envs/cvxpy/lib/python3.6/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in log\n \n"
]
],
[
[
"### Реализация барьерного метода",
"_____no_output_____"
]
],
[
[
"def BarrierPrimalLinConstr(f, gradf, hessf, A, c, x0, mu0, rho_mu, linesearch, linsys_solver, \n tol=1e-8, max_iter=500, disp_conv=False, **kwargs):\n x = x0.copy()\n n = x0.shape[0]\n mu = mu0\n while True:\n res = NewtonLinConstraintsFeasible(f, gradf, hessf, A, x, linesearch, linsys_solver, (c, mu), \n disp_conv=disp_conv, max_iter=max_iter, beta1=0.01, rho=0.5)\n x = res[\"x\"].copy()\n if n * mu < tol:\n break\n mu *= rho_mu\n return x",
"_____no_output_____"
],
[
"mu0 = 5\nrho_mu = 0.5\nx = BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, elimination_solver, max_iter=100)\n%timeit BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, elimination_solver, max_iter=100)\n%timeit BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, simple_solver, max_iter=100)\nprint(x[:n])",
"/Users/alex/anaconda3/envs/cvxpy/lib/python3.6/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in log\n \n/Users/alex/anaconda3/envs/cvxpy/lib/python3.6/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in log\n \n"
]
],
[
[
"### Сравнение времени работы",
"_____no_output_____"
]
],
[
[
"mu0 = 2\nrho_mu = 0.5\nn_list = range(3, 10)\nn_iters = np.zeros(len(n_list))\ntimes_simplex = np.zeros(len(n_list))\ntimes_barrier_simple = np.zeros(len(n_list))\nfor i, n in enumerate(n_list):\n print(\"Current dimension = {}\".format(n))\n c, A, b, bounds = generate_KleeMinty_test_problem(n)\n time = %timeit -o -q scopt.linprog(c, A, b, bounds=bounds, options={\"maxiter\": 2**max(n_list) + 1}, method=\"simplex\")\n times_simplex[i] = time.best\n A_lin = np.zeros((n, n + A.shape[0]))\n A_lin[:n, :n] = A\n A_lin[:n, n:n + A.shape[0]] = np.eye(A.shape[0])\n x0 = np.zeros(2*n)\n x0[:n] = np.random.rand(n)\n x0[n:2*n] = b - A.dot(x0[:n])\n time = %timeit -o -q BarrierPrimalLinConstr(f, gradf, hessf, A_lin, c, x0, mu0, rho_mu, backtracking, simple_solver)\n times_barrier_simple[i] = time.best",
"Current dimension = 3\n"
],
[
"plt.figure(figsize=(8, 5))\nplt.semilogy(n_list, times_simplex, label=\"Simplex\")\nplt.semilogy(n_list, times_barrier_simple, label=\"Primal barrier\")\nplt.legend(fontsize=18)\nplt.xlabel(\"Dimension, $n$\", fontsize=18)\nplt.ylabel(\"Computation time, sec.\", fontsize=18)\nplt.xticks(fontsize=18)\n_ = plt.yticks(fontsize=18)",
"_____no_output_____"
]
],
[
[
"### Комментарии\n\n- Было показано, что прямой метод эквивалентен методу Кармаркара\n- Использует информацию только о прямой задаче\n- Начальное приближение должно лежать в допустимом множестве - отдельная задача",
"_____no_output_____"
],
[
"## Барьеры для других задач\n\n- Для задач полуопределённой опримизации (SDP) - $-\\log\\det X$\n- Для задач с конусом второго порядка (SOCP) - $-\\log (t^2 - \\|x\\|^2_2)$",
"_____no_output_____"
],
[
" ## Конические задачи\n \n - То, что барьеры находятся для конусов - это не совпадение.\n - В [работе](https://epubs.siam.org/doi/book/10.1137/1.9781611970791?mobileUi=0) Ю. Нестерова и А. Немировского показано, что наиболее важные для практики конусы имеют **самосогласованные** барьеры, которые приводят к полиномиальности соответствующих методов\n - Не все выпуклые конусы обладают такими барьерами.\n - Поэтому существуют NP-трудные задачи выпуклой оптимизации.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7fa116a84cb15479b1418ae869304b0b788d067 | 8,020 | ipynb | Jupyter Notebook | content/python/ml_algorithms/.ipynb_checkpoints/kNN-algorithm-checkpoint.ipynb | Palaniappan12345/mlnotes | 359c3b27629544604f7825ab45cd9438dc777753 | [
"MIT"
] | null | null | null | content/python/ml_algorithms/.ipynb_checkpoints/kNN-algorithm-checkpoint.ipynb | Palaniappan12345/mlnotes | 359c3b27629544604f7825ab45cd9438dc777753 | [
"MIT"
] | null | null | null | content/python/ml_algorithms/.ipynb_checkpoints/kNN-algorithm-checkpoint.ipynb | Palaniappan12345/mlnotes | 359c3b27629544604f7825ab45cd9438dc777753 | [
"MIT"
] | 1 | 2021-06-19T06:05:14.000Z | 2021-06-19T06:05:14.000Z | 33.140496 | 106 | 0.454239 | [
[
[
"---\ntitle: \"kNN-algorithm\"\nauthor: \"Palaniappan S\"\ndate: 2020-09-05\ndescription: \"-\"\ntype: technical_note\ndraft: false\n---",
"_____no_output_____"
]
],
[
[
"# importing required libraries\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.metrics import accuracy_score",
"_____no_output_____"
],
[
"# read the train and test dataset\ntrain_data = pd.read_csv('train-data.csv')\ntest_data = pd.read_csv('test-data.csv')\n\n\nprint(train_data.head())",
" Survived Age Fare Pclass_1 Pclass_2 Pclass_3 Sex_female \\\n0 0 28.500000 7.2292 0 0 1 0 \n1 1 27.000000 10.5000 0 1 0 1 \n2 1 29.699118 16.1000 0 0 1 1 \n3 0 29.699118 0.0000 1 0 0 0 \n4 0 17.000000 8.6625 0 0 1 0 \n\n Sex_male SibSp_0 SibSp_1 ... Parch_0 Parch_1 Parch_2 Parch_3 \\\n0 1 1 0 ... 1 0 0 0 \n1 0 1 0 ... 1 0 0 0 \n2 0 0 1 ... 1 0 0 0 \n3 1 1 0 ... 1 0 0 0 \n4 1 1 0 ... 1 0 0 0 \n\n Parch_4 Parch_5 Parch_6 Embarked_C Embarked_Q Embarked_S \n0 0 0 0 1 0 0 \n1 0 0 0 0 0 1 \n2 0 0 0 0 0 1 \n3 0 0 0 0 0 1 \n4 0 0 0 0 0 1 \n\n[5 rows x 25 columns]\n"
],
[
"# shape of the dataset\nprint('Shape of training data :',train_data.shape)\nprint('Shape of testing data :',test_data.shape)",
"Shape of training data : (712, 25)\nShape of testing data : (179, 25)\n"
],
[
"train_x = train_data.drop(columns=['Survived'],axis=1)\ntrain_y = train_data['Survived']\n\n# seperate the independent and target variable on testing data\ntest_x = test_data.drop(columns=['Survived'],axis=1)\ntest_y = test_data['Survived']",
"_____no_output_____"
],
[
"model = KNeighborsClassifier() \n\n# fit the model with the training data\nmodel.fit(train_x,train_y)",
"_____no_output_____"
],
[
"# Number of Neighbors used to predict the target\nprint('\\nThe number of neighbors used to predict the target : ',model.n_neighbors)",
"\nThe number of neighbors used to predict the target : 5\n"
],
[
"# predict the target on the train dataset\npredict_train = model.predict(train_x)\nprint('Target on train data',predict_train) \n\n# Accuray Score on train dataset\naccuracy_train = accuracy_score(train_y,predict_train)\nprint('accuracy_score on train dataset : ', accuracy_train)",
"Target on train data [0 1 1 0 0 0 0 0 0 0 0 1 0 1 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 1 1 0 1 0 0 0 0\n 1 0 0 0 0 0 1 1 1 1 0 1 0 0 0 0 0 0 0 0 0 1 0 1 1 1 1 0 1 0 1 0 0 0 0 0 0\n 0 1 1 0 0 0 0 0 1 0 1 0 0 0 1 1 1 0 1 0 0 0 0 0 1 0 0 1 0 1 1 1 0 1 0 1 0\n 0 1 1 0 1 0 0 1 0 0 0 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 1 0 0 0 1 0 0 0 1 0 0\n 0 0 0 1 1 0 0 1 0 0 1 0 1 0 0 1 1 0 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1\n 0 0 1 1 1 0 0 1 0 1 0 0 0 1 1 1 0 0 0 1 1 1 1 0 0 0 0 1 0 0 0 0 0 0 0 0 0\n 0 0 1 0 0 0 1 0 1 0 0 0 0 1 0 1 0 0 0 1 0 1 0 0 1 0 0 0 1 1 0 1 0 0 0 0 0\n 1 1 1 1 1 0 1 1 1 1 0 0 0 0 0 1 1 1 1 0 0 0 0 0 0 1 1 0 0 0 1 0 0 0 1 0 0\n 0 0 0 0 0 1 1 0 0 0 1 0 0 0 0 0 1 1 1 1 0 0 0 0 1 0 1 0 0 0 1 1 0 1 0 0 1\n 1 0 0 1 0 0 1 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 1 1 1 0 0 0 0 1 0 0 0 1 0 1 0\n 0 0 0 1 0 0 1 0 0 1 0 1 0 0 1 0 0 1 0 0 1 1 1 1 0 0 1 1 0 1 0 1 0 1 1 1 1\n 0 1 1 1 0 0 0 0 0 0 1 0 1 0 0 1 0 0 1 1 1 0 0 0 1 0 1 0 0 1 0 0 0 0 1 0 0\n 0 1 0 0 0 0 0 0 1 0 0 1 0 0 1 0 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 0 1 0\n 0 0 0 1 1 0 0 0 0 1 1 0 0 0 1 0 1 1 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 1 1 1\n 1 0 0 0 0 1 0 0 0 1 1 0 0 0 1 1 1 0 0 0 0 0 1 1 0 0 0 0 1 1 1 1 0 1 0 0 0\n 0 1 1 1 1 0 0 1 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 0 1 1 0 0 0 1 1 1 0 1 1 1 0\n 1 0 0 0 0 1 0 1 0 1 0 1 1 0 0 1 0 1 1 0 1 0 0 0 1 0 1 1 0 0 0 0 1 0 0 0 1\n 0 0 0 1 0 1 0 1 0 0 1 1 0 0 0 0 1 0 0 0 0 0 0 1 1 1 0 1 0 0 0 1 0 1 0 0 0\n 1 1 0 0 0 0 0 1 0 0 1 0 1 0 0 1 1 0 0 0 1 0 1 0 0 1 1 0 0 0 1 0 0 1 0 1 0\n 1 0 1 1 1 0 0 1 0]\naccuracy_score on train dataset : 0.8047752808988764\n"
],
[
"# predict the target on the test dataset\npredict_test = model.predict(test_x)\nprint('Target on test data',predict_test) \n\n# Accuracy Score on test dataset\naccuracy_test = accuracy_score(test_y,predict_test)\nprint('accuracy_score on test dataset : ', accuracy_test)",
"Target on test data [0 0 0 1 1 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 1 0 1 0 0\n 1 0 0 1 1 0 0 0 1 0 0 1 1 1 0 0 0 1 1 0 0 0 0 0 1 1 0 0 0 1 0 0 0 1 0 0 0\n 1 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 0 1 0 1 0 0 0 1 0 1 1 0 1 1 0 0 1 0 0 1\n 0 1 0 0 1 0 1 0 1 1 0 1 0 0 1 1 0 0 1 0 0 0 1 1 0 0 0 1 0 1 0 1 0 0 0 0 0\n 0 0 0 0 1 0 0 0 0 1 0 0 0 1 0 0 0 1 1 0 1 0 1 0 0 1 0 0 0 0 0]\naccuracy_score on test dataset : 0.7150837988826816\n"
]
]
] | [
"raw",
"code"
] | [
[
"raw"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa11a3e962debcb4171974a8da4024800712ec | 16,315 | ipynb | Jupyter Notebook | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | martonlanga/deepchem | af2db874484603ade489fa513eac193b38ce6d56 | [
"MIT"
] | 1 | 2020-09-14T02:34:40.000Z | 2020-09-14T02:34:40.000Z | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | martonlanga/deepchem | af2db874484603ade489fa513eac193b38ce6d56 | [
"MIT"
] | 1 | 2020-07-13T18:59:49.000Z | 2020-07-13T18:59:49.000Z | examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb | martonlanga/deepchem | af2db874484603ade489fa513eac193b38ce6d56 | [
"MIT"
] | null | null | null | 44.944904 | 593 | 0.588845 | [
[
[
"# Tutorial Part 2: Learning MNIST Digit Classifiers\n\nIn the previous tutorial, we learned some basics of how to load data into DeepChem and how to use the basic DeepChem objects to load and manipulate this data. In this tutorial, you'll put the parts together and learn how to train a basic image classification model in DeepChem. You might ask, why are we bothering to learn this material in DeepChem? Part of the reason is that image processing is an increasingly important part of AI for the life sciences. So learning how to train image processing models will be very useful for using some of the more advanced DeepChem features.\n\nThe MNIST dataset contains handwritten digits along with their human annotated labels. The learning challenge for this dataset is to train a model that maps the digit image to its true label. MNIST has been a standard benchmark for machine learning for decades at this point. \n\n![MNIST](https://github.com/deepchem/deepchem/blob/master/examples/tutorials/mnist_examples.png?raw=1)\n\n## Colab\n\nThis tutorial and the rest in this sequence are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/02_Learning_MNIST_Digit_Classifiers.ipynb)\n\n## Setup\n\nWe recommend running this tutorial on Google colab. You'll need to run the following cell of installation commands on Colab to get your environment set up. If you'd rather run the tutorial locally, make sure you don't run these commands (since they'll download and install a new Anaconda python setup)",
"_____no_output_____"
]
],
[
[
"%tensorflow_version 1.x\n!curl -Lo deepchem_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py\nimport deepchem_installer\n%time deepchem_installer.install(version='2.3.0')",
"TensorFlow 1.x selected.\n % Total % Received % Xferd Average Speed Time Time Time Current\n Dload Upload Total Spent Left Speed\n100 3477 100 3477 0 0 9934 0 --:--:-- --:--:-- --:--:-- 9934\n"
],
[
"from tensorflow.examples.tutorials.mnist import input_data",
"_____no_output_____"
],
[
"# TODO: This is deprecated. Let's replace with a DeepChem native loader for maintainability.\nmnist = input_data.read_data_sets(\"MNIST_data/\", one_hot=True)",
"WARNING:tensorflow:From <ipython-input-3-227956e9c9c1>:2: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/base.py:252: _internal_retry.<locals>.wrap.<locals>.wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use urllib or similar directly.\nSuccessfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:110: dense_to_one_hot (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.one_hot on tensors.\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:290: DataSet.__init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\n"
],
[
"import deepchem as dc\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Reshape, Conv2D, Flatten, Dense, Softmax",
"_____no_output_____"
],
[
"train = dc.data.NumpyDataset(mnist.train.images, mnist.train.labels)\nvalid = dc.data.NumpyDataset(mnist.validation.images, mnist.validation.labels)",
"_____no_output_____"
],
[
"keras_model = tf.keras.Sequential([\n Reshape((28, 28, 1)),\n Conv2D(filters=32, kernel_size=5, activation=tf.nn.relu),\n Conv2D(filters=64, kernel_size=5, activation=tf.nn.relu),\n Flatten(),\n Dense(1024, activation=tf.nn.relu),\n Dense(10),\n Softmax()\n])\nmodel = dc.models.KerasModel(keras_model, dc.models.losses.CategoricalCrossEntropy())",
"_____no_output_____"
],
[
"model.fit(train, nb_epoch=2)",
"WARNING:tensorflow:From /root/miniconda/lib/python3.6/site-packages/deepchem/models/keras_model.py:169: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\nWARNING:tensorflow:From /root/miniconda/lib/python3.6/site-packages/deepchem/models/optimizers.py:76: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nWARNING:tensorflow:From /root/miniconda/lib/python3.6/site-packages/deepchem/models/keras_model.py:258: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead.\n\nWARNING:tensorflow:From /root/miniconda/lib/python3.6/site-packages/deepchem/models/keras_model.py:260: The name tf.variables_initializer is deprecated. Please use tf.compat.v1.variables_initializer instead.\n\nWARNING:tensorflow:From /root/miniconda/lib/python3.6/site-packages/deepchem/models/keras_model.py:200: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nWARNING:tensorflow:From /tensorflow-1.15.2/python3.6/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\nInstructions for updating:\nIf using Keras pass *_constraint arguments to layers.\n"
],
[
"from sklearn.metrics import roc_curve, auc\nimport numpy as np\n\nprint(\"Validation\")\nprediction = np.squeeze(model.predict_on_batch(valid.X))\n\nfpr = dict()\ntpr = dict()\nroc_auc = dict()\nfor i in range(10):\n fpr[i], tpr[i], thresh = roc_curve(valid.y[:, i], prediction[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n print(\"class %s:auc=%s\" % (i, roc_auc[i]))",
"Validation\nclass 0:auc=0.9999482812520925\nclass 1:auc=0.9999327470315621\nclass 2:auc=0.9999223382455529\nclass 3:auc=0.9999378924197698\nclass 4:auc=0.999804920932277\nclass 5:auc=0.9997608046652174\nclass 6:auc=0.9999347825797615\nclass 7:auc=0.9997099080694587\nclass 8:auc=0.999882187740275\nclass 9:auc=0.9996286953889618\n"
]
],
[
[
"# Congratulations! Time to join the Community!\n\nCongratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways:\n\n## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem)\nThis helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build.\n\n## Join the DeepChem Gitter\nThe DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7fa18eb4f321df6e6a1ec9fb6f29f25d5195f37 | 1,469 | ipynb | Jupyter Notebook | src/tests/bsplines/PPUM Basis.ipynb | certik/hfsolver | b4c50c1979fb7e468b1852b144ba756f5a51788d | [
"BSD-2-Clause"
] | 20 | 2015-03-24T13:06:39.000Z | 2022-03-29T00:14:02.000Z | src/tests/bsplines/PPUM Basis.ipynb | certik/hfsolver | b4c50c1979fb7e468b1852b144ba756f5a51788d | [
"BSD-2-Clause"
] | 6 | 2015-03-25T04:59:43.000Z | 2017-06-06T23:00:09.000Z | src/tests/bsplines/PPUM Basis.ipynb | certik/hfsolver | b4c50c1979fb7e468b1852b144ba756f5a51788d | [
"BSD-2-Clause"
] | 5 | 2016-01-20T13:38:22.000Z | 2020-11-24T15:35:43.000Z | 19.851351 | 46 | 0.446562 | [
[
[
"%pylab inline",
"_____no_output_____"
],
[
"D = loadtxt(\"basis.txt\")\nNb = (size(D,0)-1)/2\nNq = size(D,1)\nB = empty((Nb,Nq), dtype=\"double\")\nBp = empty((Nb,Nq), dtype=\"double\")\nx = D[0,:]\nfor i in range(Nb):\n B[i,:] = D[2*i+1,:]\n Bp[i,:] = D[2*i+2,:]\n \nfigure(figsize(16, 6))\nsubplot(121)\ntitle(\"B(x)\")\nfor i in range(Nb):\n plot(x, B[i,:])\n \nsubplot(122)\ntitle(\"B'(x)\")\nfor i in range(Nb):\n plot(x, Bp[i,:])\n \nsavefig(\"ppum_basis.pdf\")",
"_____no_output_____"
],
[
"plot(x, x**2/2)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7fa24fdd336e0ace1c6510ce504a2b0d4ac1c19 | 100,120 | ipynb | Jupyter Notebook | community/aqua/chemistry/h2_particle_hole.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 2 | 2017-11-09T16:33:14.000Z | 2018-02-26T00:42:17.000Z | community/aqua/chemistry/h2_particle_hole.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 1 | 2019-04-12T07:43:25.000Z | 2020-02-07T13:32:18.000Z | community/aqua/chemistry/h2_particle_hole.ipynb | Chibikuri/qiskit-tutorials | 15c121b95249de17e311c869fbc455210b2fcf5e | [
"Apache-2.0"
] | 2 | 2019-03-24T21:00:25.000Z | 2019-03-24T21:57:10.000Z | 395.731225 | 38,276 | 0.931123 | [
[
[
"## _*H2 energy plot comparing full to particle hole transformations*_\n\nThis notebook demonstrates using Qiskit Chemistry to plot graphs of the ground state energy of the Hydrogen (H2) molecule over a range of inter-atomic distances using VQE and UCCSD with full and particle hole transformations. It is compared to the same energies as computed by the ExactEigensolver\n\nThis notebook populates a dictionary, that is a progammatic representation of an input file, in order to drive the Qiskit Chemistry stack. Such a dictionary can be manipulated programmatically and this is indeed the case here where we alter the molecule supplied to the driver in each loop.\n\nThis notebook has been written to use the PYQUANTE chemistry driver. See the PYQUANTE chemistry driver readme if you need to install the external PyQuante2 library that this driver requires.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pylab\nfrom qiskit_chemistry import QiskitChemistry\n\n# Input dictionary to configure Qiskit Chemistry for the chemistry problem.\nqiskit_chemistry_dict = {\n 'problem': {'random_seed': 50},\n 'driver': {'name': 'PYQUANTE'},\n 'PYQUANTE': {'atoms': '', 'basis': 'sto3g'},\n 'operator': {'name': 'hamiltonian', 'qubit_mapping': 'jordan_wigner',\n 'two_qubit_reduction': False},\n 'algorithm': {'name': ''},\n 'optimizer': {'name': 'COBYLA', 'maxiter': 10000 },\n 'variational_form': {'name': 'UCCSD'},\n 'initial_state': {'name': 'HartreeFock'}\n}\nmolecule = 'H .0 .0 -{0}; H .0 .0 {0}'\nalgorithms = ['VQE', 'ExactEigensolver']\ntransformations = ['full', 'particle_hole']\n\nstart = 0.5 # Start distance\nby = 0.5 # How much to increase distance by\nsteps = 20 # Number of steps to increase by\nenergies = np.empty([len(transformations), len(algorithms), steps+1])\nhf_energies = np.empty(steps+1)\ndistances = np.empty(steps+1)\neval_counts = np.empty([len(transformations), steps+1])\n\nprint('Processing step __', end='')\nfor i in range(steps+1):\n print('\\b\\b{:2d}'.format(i), end='', flush=True)\n d = start + i*by/steps\n qiskit_chemistry_dict['PYQUANTE']['atoms'] = molecule.format(d/2) \n for j in range(len(algorithms)):\n qiskit_chemistry_dict['algorithm']['name'] = algorithms[j] \n for k in range(len(transformations)):\n qiskit_chemistry_dict['operator']['transformation'] = transformations[k] \n solver = QiskitChemistry()\n result = solver.run(qiskit_chemistry_dict)\n energies[k][j][i] = result['energy']\n hf_energies[i] = result['hf_energy']\n if algorithms[j] == 'VQE':\n eval_counts[k][i] = result['algorithm_retvals']['eval_count']\n distances[i] = d\nprint(' --- complete')\n\nprint('Distances: ', distances)\nprint('Energies:', energies)\nprint('Hartree-Fock energies:', hf_energies)\nprint('VQE num evaluations:', eval_counts)\n",
"Processing step 20 --- complete\nDistances: [0.5 0.525 0.55 0.575 0.6 0.625 0.65 0.675 0.7 0.725 0.75 0.775\n 0.8 0.825 0.85 0.875 0.9 0.925 0.95 0.975 1. ]\nEnergies: [[[-1.05515972 -1.0759136 -1.09262986 -1.105918 -1.11628597\n -1.12416087 -1.12990475 -1.13382619 -1.13618942 -1.13722134\n -1.13711706 -1.13604434 -1.13414766 -1.13155119 -1.12836187\n -1.12467174 -1.12056027 -1.11609624 -1.11133942 -1.10634211\n -1.10115033]\n [-1.05515974 -1.07591361 -1.09262987 -1.10591802 -1.11628599\n -1.12416089 -1.12990476 -1.1338262 -1.13618944 -1.13722136\n -1.13711707 -1.13604436 -1.13414767 -1.13155121 -1.12836188\n -1.12467175 -1.12056028 -1.11609624 -1.11133943 -1.10634212\n -1.10115034]]\n\n [[-1.05515973 -1.07591359 -1.09262986 -1.105918 -1.11628597\n -1.12416089 -1.12990475 -1.13382616 -1.13618942 -1.13722135\n -1.13711706 -1.13604434 -1.13414766 -1.1315512 -1.12836188\n -1.12467174 -1.12056028 -1.11609624 -1.11133942 -1.10634211\n -1.10115033]\n [-1.05515974 -1.07591361 -1.09262987 -1.10591802 -1.11628599\n -1.12416089 -1.12990476 -1.1338262 -1.13618944 -1.13722136\n -1.13711707 -1.13604436 -1.13414767 -1.13155121 -1.12836188\n -1.12467175 -1.12056028 -1.11609624 -1.11133943 -1.10634212\n -1.10115034]]]\nHartree-Fock energies: [-1.04299622 -1.0630621 -1.0790507 -1.09157046 -1.10112822 -1.10814997\n -1.11299652 -1.11597525 -1.11734902 -1.11734325 -1.11615145 -1.11393966\n -1.1108504 -1.10700581 -1.10251056 -1.09745432 -1.09191405 -1.08595588\n -1.07963694 -1.07300677 -1.06610866]\nVQE num evaluations: [[50. 53. 56. 50. 43. 52. 51. 45. 51. 46. 42. 57. 45. 49. 48. 50. 50. 52.\n 51. 56. 60.]\n [49. 49. 56. 50. 43. 51. 49. 45. 61. 46. 43. 57. 45. 47. 44. 50. 53. 49.\n 54. 56. 55.]]\n"
],
[
"pylab.plot(distances, hf_energies, label='Hartree-Fock')\nfor j in range(len(algorithms)):\n for k in range(len(transformations)):\n pylab.plot(distances, energies[k][j], label=algorithms[j]+' + '+transformations[k])\npylab.xlabel('Interatomic distance')\npylab.ylabel('Energy')\npylab.title('H2 Ground State Energy')\npylab.legend(loc='upper right')",
"_____no_output_____"
],
[
"pylab.plot(distances, np.subtract(hf_energies, energies[0][1]), label='Hartree-Fock')\nfor k in range(len(transformations)):\n pylab.plot(distances, np.subtract(energies[k][0], energies[k][1]), label='VQE + '+transformations[k])\npylab.xlabel('Interatomic distance')\npylab.ylabel('Energy')\npylab.title('Energy difference from ExactEigensolver')\npylab.legend(loc='upper left')",
"_____no_output_____"
],
[
"for k in range(len(transformations)):\n pylab.plot(distances, eval_counts[k], '-o', label='VQE + ' + transformations[k])\npylab.xlabel('Interatomic distance')\npylab.ylabel('Evaluations')\npylab.title('VQE number of evaluations')\npylab.legend(loc='upper left')",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7fa27635f2253993edfc0d56f7f67a920d99460 | 632,090 | ipynb | Jupyter Notebook | Copy_of_Copy_of_Portfolio_Model_Base.ipynb | amritgos/MTP | 9a826cf4d8db9fd852a8fac4be2e62b8f304ba7c | [
"MIT"
] | null | null | null | Copy_of_Copy_of_Portfolio_Model_Base.ipynb | amritgos/MTP | 9a826cf4d8db9fd852a8fac4be2e62b8f304ba7c | [
"MIT"
] | null | null | null | Copy_of_Copy_of_Portfolio_Model_Base.ipynb | amritgos/MTP | 9a826cf4d8db9fd852a8fac4be2e62b8f304ba7c | [
"MIT"
] | null | null | null | 162.199128 | 433,178 | 0.767511 | [
[
[
"!pip install git+https://github.com/amritgos/FinRL.git",
"Collecting git+https://github.com/amritgos/FinRL.git\n Cloning https://github.com/amritgos/FinRL.git to /tmp/pip-req-build-blj6h990\n Running command git clone -q https://github.com/amritgos/FinRL.git /tmp/pip-req-build-blj6h990\nCollecting pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2\n Cloning https://github.com/quantopian/pyfolio.git to /tmp/pip-install-uw9hqp2z/pyfolio_4e2e8f43031e4619a0f1097b9ca765a6\n Running command git clone -q https://github.com/quantopian/pyfolio.git /tmp/pip-install-uw9hqp2z/pyfolio_4e2e8f43031e4619a0f1097b9ca765a6\nRequirement already satisfied: numpy>=1.17.3 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.19.5)\nRequirement already satisfied: pandas>=1.1.5 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.1.5)\nRequirement already satisfied: stockstats in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (0.3.2)\nRequirement already satisfied: yfinance in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (0.1.66)\nRequirement already satisfied: matplotlib in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (3.2.2)\nRequirement already satisfied: scikit-learn>=0.21.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.0.1)\nRequirement already satisfied: gym>=0.17 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (0.17.3)\nRequirement already satisfied: stable-baselines3[extra] in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.3.0)\nRequirement already satisfied: ray[default] in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.8.0)\nRequirement already satisfied: lz4 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (3.1.3)\nRequirement already satisfied: tensorboardX in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (2.4)\nRequirement already satisfied: gputil in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.4.0)\nRequirement already satisfied: trading_calendars in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (2.1.1)\nRequirement already satisfied: alpaca_trade_api in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.4.1)\nRequirement already satisfied: ccxt in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.61.51)\nRequirement already satisfied: jqdatasdk in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (1.8.10)\nRequirement already satisfied: wrds in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (3.1.1)\nRequirement already satisfied: pytest in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (3.6.4)\nRequirement already satisfied: setuptools>=41.4.0 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (57.4.0)\nRequirement already satisfied: wheel>=0.33.6 in /usr/local/lib/python3.7/dist-packages (from finrl==0.3.1) (0.37.0)\nRequirement already satisfied: ipython>=3.2.3 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (5.5.0)\nRequirement already satisfied: pytz>=2014.10 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (2018.9)\nRequirement already satisfied: scipy>=0.14.0 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (1.4.1)\nRequirement already satisfied: seaborn>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.11.2)\nRequirement already satisfied: empyrical>=0.5.0 in /usr/local/lib/python3.7/dist-packages (from pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.5.5)\nRequirement already satisfied: pandas-datareader>=0.2 in /usr/local/lib/python3.7/dist-packages (from empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.9.0)\nRequirement already satisfied: cloudpickle<1.7.0,>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.1) (1.3.0)\nRequirement already satisfied: pyglet<=1.5.0,>=1.4.0 in /usr/local/lib/python3.7/dist-packages (from gym>=0.17->finrl==0.3.1) (1.5.0)\nRequirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (5.1.1)\nRequirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.8.1)\nRequirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (1.0.18)\nRequirement already satisfied: decorator in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (4.4.2)\nRequirement already satisfied: pickleshare in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.7.5)\nRequirement already satisfied: pexpect in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (4.8.0)\nRequirement already satisfied: pygments in /usr/local/lib/python3.7/dist-packages (from ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (2.6.1)\nRequirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.1) (3.0.6)\nRequirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.1) (1.3.2)\nRequirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.1) (2.8.2)\nRequirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib->finrl==0.3.1) (0.11.0)\nRequirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (4.6.4)\nRequirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.7/dist-packages (from pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (2.23.0)\nRequirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.2.5)\nRequirement already satisfied: six>=1.9.0 in /usr/local/lib/python3.7/dist-packages (from prompt-toolkit<2.0.0,>=1.0.4->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (1.15.0)\nRequirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from pyglet<=1.5.0,>=1.4.0->gym>=0.17->finrl==0.3.1) (0.16.0)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (1.24.3)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (2.10)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (3.0.4)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.19.0->pandas-datareader>=0.2->empyrical>=0.5.0->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (2021.10.8)\nRequirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.1) (3.0.0)\nRequirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn>=0.21.0->finrl==0.3.1) (1.1.0)\nRequirement already satisfied: aiohttp==3.7.4 in /usr/local/lib/python3.7/dist-packages (from alpaca_trade_api->finrl==0.3.1) (3.7.4)\nRequirement already satisfied: PyYAML==5.4.1 in /usr/local/lib/python3.7/dist-packages (from alpaca_trade_api->finrl==0.3.1) (5.4.1)\nRequirement already satisfied: msgpack==1.0.2 in /usr/local/lib/python3.7/dist-packages (from alpaca_trade_api->finrl==0.3.1) (1.0.2)\nRequirement already satisfied: websocket-client<2,>=0.56.0 in /usr/local/lib/python3.7/dist-packages (from alpaca_trade_api->finrl==0.3.1) (1.2.1)\nRequirement already satisfied: websockets<10,>=8.0 in /usr/local/lib/python3.7/dist-packages (from alpaca_trade_api->finrl==0.3.1) (9.1)\nRequirement already satisfied: multidict<7.0,>=4.5 in /usr/local/lib/python3.7/dist-packages (from aiohttp==3.7.4->alpaca_trade_api->finrl==0.3.1) (5.2.0)\nRequirement already satisfied: async-timeout<4.0,>=3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp==3.7.4->alpaca_trade_api->finrl==0.3.1) (3.0.1)\nRequirement already satisfied: attrs>=17.3.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp==3.7.4->alpaca_trade_api->finrl==0.3.1) (21.2.0)\nRequirement already satisfied: typing-extensions>=3.6.5 in /usr/local/lib/python3.7/dist-packages (from aiohttp==3.7.4->alpaca_trade_api->finrl==0.3.1) (3.10.0.2)\nRequirement already satisfied: yarl<2.0,>=1.0 in /usr/local/lib/python3.7/dist-packages (from aiohttp==3.7.4->alpaca_trade_api->finrl==0.3.1) (1.6.3)\nRequirement already satisfied: cryptography>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from ccxt->finrl==0.3.1) (35.0.0)\nRequirement already satisfied: aiodns>=1.1.1 in /usr/local/lib/python3.7/dist-packages (from ccxt->finrl==0.3.1) (3.0.0)\nRequirement already satisfied: pycares>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from aiodns>=1.1.1->ccxt->finrl==0.3.1) (4.1.2)\nRequirement already satisfied: cffi>=1.12 in /usr/local/lib/python3.7/dist-packages (from cryptography>=2.6.1->ccxt->finrl==0.3.1) (1.15.0)\nRequirement already satisfied: pycparser in /usr/local/lib/python3.7/dist-packages (from cffi>=1.12->cryptography>=2.6.1->ccxt->finrl==0.3.1) (2.21)\nRequirement already satisfied: SQLAlchemy>=1.2.8 in /usr/local/lib/python3.7/dist-packages (from jqdatasdk->finrl==0.3.1) (1.4.27)\nRequirement already satisfied: thriftpy2>=0.3.9 in /usr/local/lib/python3.7/dist-packages (from jqdatasdk->finrl==0.3.1) (0.4.14)\nRequirement already satisfied: pymysql>=0.7.6 in /usr/local/lib/python3.7/dist-packages (from jqdatasdk->finrl==0.3.1) (1.0.2)\nRequirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.1) (4.8.2)\nRequirement already satisfied: greenlet!=0.4.17 in /usr/local/lib/python3.7/dist-packages (from SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.1) (1.1.2)\nRequirement already satisfied: ply<4.0,>=3.4 in /usr/local/lib/python3.7/dist-packages (from thriftpy2>=0.3.9->jqdatasdk->finrl==0.3.1) (3.11)\nRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->SQLAlchemy>=1.2.8->jqdatasdk->finrl==0.3.1) (3.6.0)\nRequirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.7/dist-packages (from pexpect->ipython>=3.2.3->pyfolio@ git+https://github.com/quantopian/pyfolio.git#egg=pyfolio-0.9.2->finrl==0.3.1) (0.7.0)\nRequirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.1) (1.11.0)\nRequirement already satisfied: pluggy<0.8,>=0.5 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.1) (0.7.1)\nRequirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.1) (8.11.0)\nRequirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.7/dist-packages (from pytest->finrl==0.3.1) (1.4.0)\nRequirement already satisfied: protobuf>=3.15.3 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (3.17.3)\nRequirement already satisfied: jsonschema in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (2.6.0)\nRequirement already satisfied: grpcio>=1.28.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (1.42.0)\nRequirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (3.4.0)\nRequirement already satisfied: click>=7.0 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (7.1.2)\nRequirement already satisfied: redis>=3.5.0 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (4.0.1)\nRequirement already satisfied: gpustat>=1.0.0b1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (1.0.0b1)\nRequirement already satisfied: aiohttp-cors in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.7.0)\nRequirement already satisfied: colorful in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.5.4)\nRequirement already satisfied: py-spy>=0.2.0 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.3.11)\nRequirement already satisfied: aioredis<2 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (1.3.1)\nRequirement already satisfied: prometheus-client>=0.7.1 in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.12.0)\nRequirement already satisfied: opencensus in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.8.0)\nRequirement already satisfied: hiredis in /usr/local/lib/python3.7/dist-packages (from aioredis<2->ray[default]->finrl==0.3.1) (2.0.0)\nRequirement already satisfied: blessed>=1.17.1 in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.1) (1.19.0)\nRequirement already satisfied: psutil in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.1) (5.4.8)\nRequirement already satisfied: nvidia-ml-py3>=7.352.0 in /usr/local/lib/python3.7/dist-packages (from gpustat>=1.0.0b1->ray[default]->finrl==0.3.1) (7.352.0)\nRequirement already satisfied: deprecated in /usr/local/lib/python3.7/dist-packages (from redis>=3.5.0->ray[default]->finrl==0.3.1) (1.2.13)\nRequirement already satisfied: wrapt<2,>=1.10 in /usr/local/lib/python3.7/dist-packages (from deprecated->redis>=3.5.0->ray[default]->finrl==0.3.1) (1.13.3)\nRequirement already satisfied: opencensus-context==0.1.2 in /usr/local/lib/python3.7/dist-packages (from opencensus->ray[default]->finrl==0.3.1) (0.1.2)\nRequirement already satisfied: google-api-core<3.0.0,>=1.0.0 in /usr/local/lib/python3.7/dist-packages (from opencensus->ray[default]->finrl==0.3.1) (1.26.3)\nRequirement already satisfied: googleapis-common-protos<2.0dev,>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (1.53.0)\nRequirement already satisfied: google-auth<2.0dev,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (1.35.0)\nRequirement already satisfied: packaging>=14.3 in /usr/local/lib/python3.7/dist-packages (from google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (21.3)\nRequirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (4.7.2)\nRequirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (4.2.4)\nRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (0.2.8)\nRequirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2.0dev,>=1.21.1->google-api-core<3.0.0,>=1.0.0->opencensus->ray[default]->finrl==0.3.1) (0.4.8)\nRequirement already satisfied: tabulate in /usr/local/lib/python3.7/dist-packages (from ray[default]->finrl==0.3.1) (0.8.9)\nRequirement already satisfied: torch>=1.8.1 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.1) (1.10.0+cu111)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.1) (4.1.2.30)\nRequirement already satisfied: atari-py~=0.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.1) (0.2.9)\nRequirement already satisfied: tensorboard>=2.2.0 in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.1) (2.7.0)\nRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from stable-baselines3[extra]->finrl==0.3.1) (7.1.2)\nRequirement already satisfied: absl-py>=0.4 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (0.12.0)\nRequirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (0.4.6)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (3.3.6)\nRequirement already satisfied: tensorboard-data-server<0.7.0,>=0.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (0.6.1)\nRequirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (1.8.0)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python3.7/dist-packages (from tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (1.0.1)\nRequirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (1.3.0)\nRequirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard>=2.2.0->stable-baselines3[extra]->finrl==0.3.1) (3.1.1)\nRequirement already satisfied: int-date>=0.1.7 in /usr/local/lib/python3.7/dist-packages (from stockstats->finrl==0.3.1) (0.1.8)\nRequirement already satisfied: toolz in /usr/local/lib/python3.7/dist-packages (from trading_calendars->finrl==0.3.1) (0.11.2)\nRequirement already satisfied: psycopg2-binary in /usr/local/lib/python3.7/dist-packages (from wrds->finrl==0.3.1) (2.9.2)\nRequirement already satisfied: mock in /usr/local/lib/python3.7/dist-packages (from wrds->finrl==0.3.1) (4.0.3)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance->finrl==0.3.1) (0.0.10)\n"
],
[
"!git clone https://github.com/amritgos/FinRL.git",
"fatal: destination path 'FinRL' already exists and is not an empty directory.\n"
],
[
"cd FinRL",
"/content/FinRL\n"
],
[
"!pip install tsmoothie",
"Requirement already satisfied: tsmoothie in /usr/local/lib/python3.7/dist-packages (1.0.4)\nRequirement already satisfied: simdkalman in /usr/local/lib/python3.7/dist-packages (from tsmoothie) (1.0.2)\nRequirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from tsmoothie) (1.4.1)\nRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from tsmoothie) (1.19.5)\n"
],
[
"import warnings\nwarnings.filterwarnings(\"ignore\")",
"_____no_output_____"
],
[
"import pandas as pd\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\n# matplotlib.use('Agg')\nimport datetime\n\n%matplotlib inline\nfrom finrl.apps import config\nfrom finrl.neo_finrl.preprocessor.yahoodownloader import YahooDownloader\nfrom finrl.neo_finrl.preprocessor.preprocessors import FeatureEngineer, data_split\nfrom finrl.neo_finrl.env_stock_trading.env_stocktrading import StockTradingEnv\nfrom finrl.drl_agents.stablebaselines3.models import DRLAgent,DRLEnsembleAgent\nfrom finrl.plot import backtest_stats, backtest_plot, get_daily_return, get_baseline, convert_daily_return_to_pyfolio_ts\nfrom tsmoothie.smoother import *\nfrom tsmoothie.utils_func import create_windows\n\nfrom pprint import pprint\n\nimport sys\nsys.path.append(\"../FinRL-Library\")\n\nimport itertools",
"_____no_output_____"
],
[
"import os\nif not os.path.exists(\"./\" + config.DATA_SAVE_DIR):\n os.makedirs(\"./\" + config.DATA_SAVE_DIR)\nif not os.path.exists(\"./\" + config.TRAINED_MODEL_DIR):\n os.makedirs(\"./\" + config.TRAINED_MODEL_DIR)\nif not os.path.exists(\"./\" + config.TENSORBOARD_LOG_DIR):\n os.makedirs(\"./\" + config.TENSORBOARD_LOG_DIR)\nif not os.path.exists(\"./\" + config.RESULTS_DIR):\n os.makedirs(\"./\" + config.RESULTS_DIR)",
"_____no_output_____"
],
[
"df = YahooDownloader(start_date = '2008-01-01',\n end_date = '2021-01-01',\n ticker_list = config.SENSEX_30_TICKER).fetch_data()",
"[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\n[*********************100%***********************] 1 of 1 completed\nShape of DataFrame: (95750, 8)\n"
],
[
"df.head()",
"_____no_output_____"
],
[
"def add_covariance_matrix(df):\n # add covariance matrix as states\n df=df.sort_values(['date','tic'],ignore_index=True)\n df.index = df.date.factorize()[0]\n\n cov_list = []\n return_list = []\n\n # look back is one year\n lookback=252\n for i in range(lookback,len(df.index.unique())):\n data_lookback = df.loc[i-lookback:i,:]\n price_lookback=data_lookback.pivot_table(index = 'date',columns = 'tic', values = 'close')\n return_lookback = price_lookback.pct_change().dropna()\n return_list.append(return_lookback)\n\n covs = return_lookback.cov().values \n cov_list.append(covs)\n\n \n df_cov = pd.DataFrame({'date':df.date.unique()[lookback:],'cov_list':cov_list,'return_list':return_list})\n df = df.merge(df_cov, on='date')\n df = df.sort_values(['date','tic']).reset_index(drop=True)\n\n return df\n ",
"_____no_output_____"
],
[
"df_org = df.copy()\nfor ticker in df['tic'].unique():\n df_tic = df.loc[df.tic==ticker]\n smoother = KalmanSmoother(component='level_longseason', \n component_noise={'level':0.1, 'longseason':0.1}, \n n_longseasons=252)\n smoother.smooth(df_tic['close'].T)\n close_smooth = smoother.smooth_data\n df.loc[df.tic==ticker,'close'] = close_smooth[0]\n",
"_____no_output_____"
],
[
"processed = add_covariance_matrix(df)\nprocessed = processed.drop('return_list', axis=1)\nprocessed.head()",
"_____no_output_____"
],
[
"# vix_df = pd.read_csv('datasets/IndiaVIX_data.csv')\nsentiment_df = pd.read_csv('datasets/Sentiment_Compiled.csv')\nsentiment_df",
"_____no_output_____"
],
[
"fe = FeatureEngineer(\n use_technical_indicator=True,\n use_turbulence=False,\n use_vix=False,\n use_sentiment=True,\n sentiment_df=sentiment_df,\n user_defined_feature = True)\n\nprocessed = fe.preprocess_data(processed)\n\nprocessed_close = pd.merge(processed[['date','tic']],df_org[['date','tic','close']], on=['date','tic'], how='inner') \n\nprocessed['close'] = processed_close['close']",
"Successfully added technical indicators\nSuccessfully added Sentiment Features\nSuccessfully added user defined features\n"
],
[
"train = data_split(processed, '2011-01-01','2019-01-01')\ntrain",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nfrom gym.utils import seeding\nimport gym\nfrom gym import spaces\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom stable_baselines3.common.vec_env import DummyVecEnv\n\n\nclass StockPortfolioEnv(gym.Env):\n\n metadata = {'render.modes': ['human']}\n\n def __init__(self, \n df,\n stock_dim,\n hmax,\n initial_amount,\n transaction_cost_pct,\n reward_scaling,\n state_space,\n action_space,\n tech_indicator_list=[],\n sentiment_indicator_list=[],\n user_indicator_list=[],\n turbulence_threshold=None,\n lookback=252,\n day = 0):\n #super(StockEnv, self).__init__()\n #money = 10 , scope = 1\n self.day = day\n self.lookback=lookback\n self.df = df\n self.stock_dim = stock_dim\n self.hmax = hmax\n self.initial_amount = initial_amount\n self.transaction_cost_pct =transaction_cost_pct\n self.reward_scaling = reward_scaling\n self.state_space = state_space\n self.action_space = action_space\n self.tech_indicator_list = tech_indicator_list\n self.sentiment_indicator_list = sentiment_indicator_list\n self.user_indicator_list = user_indicator_list\n\n # action_space normalization and shape is self.stock_dim\n self.action_space = spaces.Box(low = 0, high = 1,shape = (self.action_space,)) \n # Shape = (34, 30)\n # covariance matrix + technical indicators\n # self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (self.state_space+len(self.tech_indicator_list)+len(self.sentiment_indicator_list)+len(self.user_indicator_list),self.state_space))\n self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape = (1+len(self.tech_indicator_list)+len(self.sentiment_indicator_list)+len(self.user_indicator_list),self.state_space))\n\n # load data from a pandas dataframe\n self.data = self.df.loc[self.day,:]\n self.covs = self.data['cov_list'].values[0]\n # state_array = np.append(np.array(self.covs), [self.data[tech].values.tolist() for tech in self.tech_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n # self.state = state_array\n\n state_array = np.zeros([1,self.state_space])\n if len(self.tech_indicator_list) > 0:\n state_array = np.append(state_array, [self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)\n state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n self.state = state_array\n\n self.terminal = False \n self.turbulence_threshold = turbulence_threshold \n # initalize state: inital portfolio return + individual stock return + individual weights\n self.portfolio_value = self.initial_amount\n\n # memorize portfolio value each step\n self.asset_memory = [self.initial_amount]\n # memorize portfolio return each step\n self.portfolio_return_memory = [0]\n self.actions_memory=[[1/self.stock_dim]*self.stock_dim]\n self.date_memory=[self.data.date.unique()[0]]\n\n \n def step(self, actions):\n # print(self.day)\n self.terminal = self.day >= len(self.df.index.unique())-1\n # print(actions)\n\n if self.terminal:\n df = pd.DataFrame(self.portfolio_return_memory)\n df.columns = ['daily_return']\n plt.plot(df.daily_return.cumsum(),'r')\n plt.savefig('results/cumulative_reward.png')\n plt.close()\n \n plt.plot(self.portfolio_return_memory,'r')\n plt.savefig('results/rewards.png')\n plt.close()\n\n print(\"=================================\")\n print(\"begin_total_asset:{}\".format(self.asset_memory[0])) \n print(\"end_total_asset:{}\".format(self.portfolio_value))\n\n df_daily_return = pd.DataFrame(self.portfolio_return_memory)\n df_daily_return.columns = ['daily_return']\n if df_daily_return['daily_return'].std() !=0:\n sharpe = (252**0.5)*df_daily_return['daily_return'].mean()/ \\\n df_daily_return['daily_return'].std()\n print(\"Sharpe: \",sharpe)\n print(\"=================================\")\n \n return self.state, self.reward, self.terminal,{}\n\n else:\n #print(\"Model actions: \",actions)\n # actions are the portfolio weight\n # normalize to sum of 1\n #if (np.array(actions) - np.array(actions).min()).sum() != 0:\n # norm_actions = (np.array(actions) - np.array(actions).min()) / (np.array(actions) - np.array(actions).min()).sum()\n #else:\n # norm_actions = actions\n weights = self.softmax_normalization(actions) \n #print(\"Normalized actions: \", weights)\n self.actions_memory.append(weights)\n last_day_memory = self.data\n\n #load next state\n self.day += 1\n self.data = self.df.loc[self.day,:]\n self.covs = self.data['cov_list'].values[0]\n\n # state_array = np.append(np.array(self.covs), [self.data[tech].values.tolist() for tech in self.tech_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n # self.state = state_array\n\n state_array = [weights]\n if len(self.tech_indicator_list) > 0:\n state_array = np.append(state_array, [self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)\n state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n self.state = state_array\n\n # calcualte portfolio return\n # individual stocks' return * weight\n portfolio_return = sum(((self.data.close.values / last_day_memory.close.values)-1)*weights)\n\n # update portfolio value\n new_portfolio_value = self.portfolio_value*(1+portfolio_return)\n self.portfolio_value = new_portfolio_value\n\n # update portfolio risk\n port_variance = np.matmul(weights.T,np.matmul(self.covs,weights))\n\n # print('portfolio variance', port_variance)\n # print('weights', sum(weights))\n # print('portfolio returns', portfolio_return)\n\n # save into memory\n self.portfolio_return_memory.append(portfolio_return)\n self.date_memory.append(self.data.date.unique()[0]) \n self.asset_memory.append(new_portfolio_value)\n\n # the reward is the new portfolio value or end portfolo value\n # self.reward = portfolio_return/np.sqrt(port_variance)\n self.reward = self.portfolio_value\n\n # self.reward = 1/np.sqrt(port_variance)\n\n self.reward = self.reward*self.reward_scaling\n\n return self.state, self.reward, self.terminal, {}\n\n def reset(self):\n self.asset_memory = [self.initial_amount]\n self.day = 0\n self.data = self.df.loc[self.day,:]\n # load states\n self.covs = self.data['cov_list'].values[0]\n\n # state_array = np.append(np.array(self.covs), [self.data[tech].values.tolist() for tech in self.tech_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n # state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n # self.state = state_array\n\n state_array = np.zeros([1,self.state_space])\n if len(self.tech_indicator_list) > 0:\n state_array = np.append(state_array, [self.data[tech].values.tolist() for tech in self.tech_indicator_list], axis=0)\n state_array = np.append(state_array, [self.data[sent].values.tolist() for sent in self.sentiment_indicator_list ], axis=0)\n state_array = np.append(state_array, [self.data[user].values.tolist() for user in self.user_indicator_list ], axis=0)\n self.state = state_array\n\n self.portfolio_value = self.initial_amount\n #self.cost = 0\n #self.trades = 0\n self.terminal = False \n self.portfolio_return_memory = [0]\n self.actions_memory=[[1/self.stock_dim]*self.stock_dim]\n self.date_memory=[self.data.date.unique()[0]] \n return self.state\n \n def render(self, mode='human'):\n return self.state\n \n def softmax_normalization(self, actions):\n numerator = np.exp(actions)\n denominator = np.sum(np.exp(actions))\n softmax_output = numerator/denominator\n return softmax_output\n\n \n def save_asset_memory(self):\n date_list = self.date_memory\n portfolio_return = self.portfolio_return_memory\n #print(len(date_list))\n #print(len(asset_list))\n df_account_value = pd.DataFrame({'date':date_list,'daily_return':portfolio_return})\n return df_account_value\n\n def save_action_memory(self):\n # date and close price length must match actions length\n date_list = self.date_memory\n df_date = pd.DataFrame(date_list)\n df_date.columns = ['date']\n \n action_list = self.actions_memory\n df_actions = pd.DataFrame(action_list)\n df_actions.columns = self.data.tic.values\n df_actions.index = df_date.date\n #df_actions = pd.DataFrame({'date':date_list,'actions':action_list})\n return df_actions\n\n def _seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def get_sb_env(self):\n e = DummyVecEnv([lambda: self])\n obs = e.reset()\n return e, obs",
"_____no_output_____"
],
[
"stock_dimension = len(train.tic.unique())\nstate_space = stock_dimension\nprint(f\"Stock Dimension: {stock_dimension}, State Space: {state_space}\")\n\nenv_kwargs = {\n \"hmax\": 1000, \n \"initial_amount\": 1000000, \n \"transaction_cost_pct\": 0.001, \n \"state_space\": state_space, \n \"stock_dim\": stock_dimension, \n # \"tech_indicator_list\": config.TECHNICAL_INDICATORS_LIST, \n \"sentiment_indicator_list\": config.SENTIMENT_INDICATORS_LIST, \n \"user_indicator_list\": config.USER_INDICATORS_LIST,\n \"action_space\": stock_dimension, \n \"reward_scaling\": 100\n \n}\n\ne_train_gym = StockPortfolioEnv(df = train, **env_kwargs)",
"Stock Dimension: 30, State Space: 30\n"
],
[
"env_train, _ = e_train_gym.get_sb_env()\nprint(type(env_train))",
"<class 'stable_baselines3.common.vec_env.dummy_vec_env.DummyVecEnv'>\n"
],
[
"# initialize\n\n# from torch.nn import Softsign, ReLU\n# ppo_params ={'n_steps': 128, \n# 'ent_coef': 0.01, \n# 'learning_rate': 0.0025, \n# 'batch_size': 64, \n# 'gamma': 0.99}\n\n# policy_kwargs = {\n# # \"activation_fn\": ReLU,\n# \"net_arch\": [1024, 1024, 1024, 256, 64], \n# # \"squash_output\": True\n# }\n\n\nagent = DRLAgent(env = env_train)\n\nA2C_PARAMS = {\"n_steps\": 5, \"ent_coef\": 0.005, \"learning_rate\": 0.0025}\nmodel_a2c = agent.get_model(model_name=\"a2c\",model_kwargs = A2C_PARAMS)\n\ntrained_a2c = agent.train_model(model=model_a2c, \n tb_log_name='a2c',\n total_timesteps=60000)",
"{'n_steps': 5, 'ent_coef': 0.005, 'learning_rate': 0.0025}\nUsing cpu device\nLogging to tensorboard_log/a2c/a2c_4\n------------------------------------\n| time/ | |\n| fps | 276 |\n| iterations | 100 |\n| time_elapsed | 1 |\n| total_timesteps | 500 |\n| train/ | |\n| entropy_loss | -42 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 99 |\n| policy_loss | 1.67e+10 |\n| std | 0.984 |\n| value_loss | 1.74e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 288 |\n| iterations | 200 |\n| time_elapsed | 3 |\n| total_timesteps | 1000 |\n| train/ | |\n| entropy_loss | -41.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 199 |\n| policy_loss | 2.92e+10 |\n| std | 0.974 |\n| value_loss | 5.88e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 291 |\n| iterations | 300 |\n| time_elapsed | 5 |\n| total_timesteps | 1500 |\n| train/ | |\n| entropy_loss | -41.6 |\n| explained_variance | 2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 299 |\n| policy_loss | 3.28e+10 |\n| std | 0.97 |\n| value_loss | 8.34e+17 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4043135.497793255\nSharpe: 1.312930194873917\n=================================\n------------------------------------\n| time/ | |\n| fps | 280 |\n| iterations | 400 |\n| time_elapsed | 7 |\n| total_timesteps | 2000 |\n| train/ | |\n| entropy_loss | -41.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 399 |\n| policy_loss | 1.07e+10 |\n| std | 0.968 |\n| value_loss | 8.42e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 500 |\n| time_elapsed | 9 |\n| total_timesteps | 2500 |\n| train/ | |\n| entropy_loss | -41.4 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 499 |\n| policy_loss | 1.73e+10 |\n| std | 0.964 |\n| value_loss | 1.9e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 275 |\n| iterations | 600 |\n| time_elapsed | 10 |\n| total_timesteps | 3000 |\n| train/ | |\n| entropy_loss | -41.3 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 599 |\n| policy_loss | 3.1e+10 |\n| std | 0.96 |\n| value_loss | 6.46e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 277 |\n| iterations | 700 |\n| time_elapsed | 12 |\n| total_timesteps | 3500 |\n| train/ | |\n| entropy_loss | -41.2 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 699 |\n| policy_loss | 3.76e+10 |\n| std | 0.959 |\n| value_loss | 9.24e+17 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4196858.192622206\nSharpe: 1.3225855447348187\n=================================\n------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 800 |\n| time_elapsed | 14 |\n| total_timesteps | 4000 |\n| train/ | |\n| entropy_loss | -41.1 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 799 |\n| policy_loss | 1.28e+10 |\n| std | 0.955 |\n| value_loss | 1.02e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 900 |\n| time_elapsed | 16 |\n| total_timesteps | 4500 |\n| train/ | |\n| entropy_loss | -41.1 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 899 |\n| policy_loss | 1.52e+10 |\n| std | 0.955 |\n| value_loss | 1.79e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 1000 |\n| time_elapsed | 18 |\n| total_timesteps | 5000 |\n| train/ | |\n| entropy_loss | -41 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 999 |\n| policy_loss | 3e+10 |\n| std | 0.953 |\n| value_loss | 6.34e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 1100 |\n| time_elapsed | 20 |\n| total_timesteps | 5500 |\n| train/ | |\n| entropy_loss | -40.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 1099 |\n| policy_loss | 3.92e+10 |\n| std | 0.949 |\n| value_loss | 9.99e+17 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4241993.507947351\nSharpe: 1.332351100010289\n=================================\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 1200 |\n| time_elapsed | 22 |\n| total_timesteps | 6000 |\n| train/ | |\n| entropy_loss | -40.8 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 1199 |\n| policy_loss | 1.17e+10 |\n| std | 0.946 |\n| value_loss | 9.68e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 1300 |\n| time_elapsed | 24 |\n| total_timesteps | 6500 |\n| train/ | |\n| entropy_loss | -40.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 1299 |\n| policy_loss | 1.9e+10 |\n| std | 0.94 |\n| value_loss | 2.15e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 1400 |\n| time_elapsed | 26 |\n| total_timesteps | 7000 |\n| train/ | |\n| entropy_loss | -40.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 1399 |\n| policy_loss | 2.92e+10 |\n| std | 0.939 |\n| value_loss | 5.79e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 1500 |\n| time_elapsed | 28 |\n| total_timesteps | 7500 |\n| train/ | |\n| entropy_loss | -40.4 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 1499 |\n| policy_loss | 3.72e+10 |\n| std | 0.934 |\n| value_loss | 1.01e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4189665.7951989216\nSharpe: 1.3202894746027989\n=================================\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 1600 |\n| time_elapsed | 29 |\n| total_timesteps | 8000 |\n| train/ | |\n| entropy_loss | -40.2 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 1599 |\n| policy_loss | 1.14e+10 |\n| std | 0.927 |\n| value_loss | 1.05e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 1700 |\n| time_elapsed | 31 |\n| total_timesteps | 8500 |\n| train/ | |\n| entropy_loss | -40.2 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 1699 |\n| policy_loss | 1.67e+10 |\n| std | 0.928 |\n| value_loss | 2e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 1800 |\n| time_elapsed | 33 |\n| total_timesteps | 9000 |\n| train/ | |\n| entropy_loss | -40 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 1799 |\n| policy_loss | 2.98e+10 |\n| std | 0.922 |\n| value_loss | 6.97e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 1900 |\n| time_elapsed | 35 |\n| total_timesteps | 9500 |\n| train/ | |\n| entropy_loss | -39.8 |\n| explained_variance | 5.36e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 1899 |\n| policy_loss | 3.77e+10 |\n| std | 0.916 |\n| value_loss | 1.14e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4060769.1714057405\nSharpe: 1.2956323015146667\n=================================\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 2000 |\n| time_elapsed | 36 |\n| total_timesteps | 10000 |\n| train/ | |\n| entropy_loss | -39.8 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 1999 |\n| policy_loss | 1.07e+10 |\n| std | 0.914 |\n| value_loss | 9.91e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 2100 |\n| time_elapsed | 38 |\n| total_timesteps | 10500 |\n| train/ | |\n| entropy_loss | -39.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2099 |\n| policy_loss | 1.43e+10 |\n| std | 0.915 |\n| value_loss | 1.63e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 2200 |\n| time_elapsed | 40 |\n| total_timesteps | 11000 |\n| train/ | |\n| entropy_loss | -39.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2199 |\n| policy_loss | 2.36e+10 |\n| std | 0.917 |\n| value_loss | 5.07e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 2300 |\n| time_elapsed | 41 |\n| total_timesteps | 11500 |\n| train/ | |\n| entropy_loss | -39.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2299 |\n| policy_loss | 3.73e+10 |\n| std | 0.91 |\n| value_loss | 9.95e+17 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3831022.656906473\nSharpe: 1.2293848380955916\n=================================\n------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 2400 |\n| time_elapsed | 43 |\n| total_timesteps | 12000 |\n| train/ | |\n| entropy_loss | -39.6 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 2399 |\n| policy_loss | 1.08e+10 |\n| std | 0.909 |\n| value_loss | 9.47e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 2500 |\n| time_elapsed | 45 |\n| total_timesteps | 12500 |\n| train/ | |\n| entropy_loss | -39.2 |\n| explained_variance | 1.79e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 2499 |\n| policy_loss | 1.61e+10 |\n| std | 0.9 |\n| value_loss | 1.99e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 275 |\n| iterations | 2600 |\n| time_elapsed | 47 |\n| total_timesteps | 13000 |\n| train/ | |\n| entropy_loss | -39 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2599 |\n| policy_loss | 2.82e+10 |\n| std | 0.894 |\n| value_loss | 5.62e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 276 |\n| iterations | 2700 |\n| time_elapsed | 48 |\n| total_timesteps | 13500 |\n| train/ | |\n| entropy_loss | -38.9 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 2699 |\n| policy_loss | 3.65e+10 |\n| std | 0.891 |\n| value_loss | 1.08e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3693360.0686548604\nSharpe: 1.2111940241142194\n=================================\n------------------------------------\n| time/ | |\n| fps | 276 |\n| iterations | 2800 |\n| time_elapsed | 50 |\n| total_timesteps | 14000 |\n| train/ | |\n| entropy_loss | -38.7 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2799 |\n| policy_loss | 1.04e+10 |\n| std | 0.886 |\n| value_loss | 9.86e+16 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 277 |\n| iterations | 2900 |\n| time_elapsed | 52 |\n| total_timesteps | 14500 |\n| train/ | |\n| entropy_loss | -38.5 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 2899 |\n| policy_loss | 1.6e+10 |\n| std | 0.878 |\n| value_loss | 2.17e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 278 |\n| iterations | 3000 |\n| time_elapsed | 53 |\n| total_timesteps | 15000 |\n| train/ | |\n| entropy_loss | -38.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 2999 |\n| policy_loss | 2.45e+10 |\n| std | 0.882 |\n| value_loss | 5.19e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 278 |\n| iterations | 3100 |\n| time_elapsed | 55 |\n| total_timesteps | 15500 |\n| train/ | |\n| entropy_loss | -38.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 3099 |\n| policy_loss | 3.7e+10 |\n| std | 0.882 |\n| value_loss | 1.27e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3914260.327426779\nSharpe: 1.2595104512333415\n=================================\n------------------------------------\n| time/ | |\n| fps | 277 |\n| iterations | 3200 |\n| time_elapsed | 57 |\n| total_timesteps | 16000 |\n| train/ | |\n| entropy_loss | -38.1 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 3199 |\n| policy_loss | 1.07e+10 |\n| std | 0.871 |\n| value_loss | 8.55e+16 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 276 |\n| iterations | 3300 |\n| time_elapsed | 59 |\n| total_timesteps | 16500 |\n| train/ | |\n| entropy_loss | -38 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 3299 |\n| policy_loss | 1.64e+10 |\n| std | 0.869 |\n| value_loss | 2.38e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 276 |\n| iterations | 3400 |\n| time_elapsed | 61 |\n| total_timesteps | 17000 |\n| train/ | |\n| entropy_loss | -37.7 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 3399 |\n| policy_loss | 2.65e+10 |\n| std | 0.86 |\n| value_loss | 5.48e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 275 |\n| iterations | 3500 |\n| time_elapsed | 63 |\n| total_timesteps | 17500 |\n| train/ | |\n| entropy_loss | -37.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 3499 |\n| policy_loss | 4.27e+10 |\n| std | 0.86 |\n| value_loss | 1.63e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4192364.623585301\nSharpe: 1.3423936544880872\n=================================\n-------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 3600 |\n| time_elapsed | 65 |\n| total_timesteps | 18000 |\n| train/ | |\n| entropy_loss | -37.6 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 3599 |\n| policy_loss | 1.27e+10 |\n| std | 0.858 |\n| value_loss | 1.27e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 3700 |\n| time_elapsed | 67 |\n| total_timesteps | 18500 |\n| train/ | |\n| entropy_loss | -37.4 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 3699 |\n| policy_loss | 1.8e+10 |\n| std | 0.852 |\n| value_loss | 2.53e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 3800 |\n| time_elapsed | 69 |\n| total_timesteps | 19000 |\n| train/ | |\n| entropy_loss | -37.2 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 3799 |\n| policy_loss | 2.59e+10 |\n| std | 0.847 |\n| value_loss | 5.93e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 274 |\n| iterations | 3900 |\n| time_elapsed | 71 |\n| total_timesteps | 19500 |\n| train/ | |\n| entropy_loss | -37 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 3899 |\n| policy_loss | 4.05e+10 |\n| std | 0.84 |\n| value_loss | 1.49e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4137058.6169590103\nSharpe: 1.3505351134387877\n=================================\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 4000 |\n| time_elapsed | 73 |\n| total_timesteps | 20000 |\n| train/ | |\n| entropy_loss | -37 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 3999 |\n| policy_loss | 1.3e+10 |\n| std | 0.844 |\n| value_loss | 1.27e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 4100 |\n| time_elapsed | 75 |\n| total_timesteps | 20500 |\n| train/ | |\n| entropy_loss | -36.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4099 |\n| policy_loss | 1.97e+10 |\n| std | 0.841 |\n| value_loss | 2.98e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 4200 |\n| time_elapsed | 76 |\n| total_timesteps | 21000 |\n| train/ | |\n| entropy_loss | -36.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4199 |\n| policy_loss | 2.79e+10 |\n| std | 0.838 |\n| value_loss | 6.19e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 273 |\n| iterations | 4300 |\n| time_elapsed | 78 |\n| total_timesteps | 21500 |\n| train/ | |\n| entropy_loss | -36.7 |\n| explained_variance | -3.58e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 4299 |\n| policy_loss | 4.56e+10 |\n| std | 0.836 |\n| value_loss | 1.73e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4277159.909416507\nSharpe: 1.3577367200108739\n=================================\n-------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 4400 |\n| time_elapsed | 80 |\n| total_timesteps | 22000 |\n| train/ | |\n| entropy_loss | -36.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 4399 |\n| policy_loss | 1.22e+10 |\n| std | 0.836 |\n| value_loss | 1.3e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 272 |\n| iterations | 4500 |\n| time_elapsed | 82 |\n| total_timesteps | 22500 |\n| train/ | |\n| entropy_loss | -36.8 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 4499 |\n| policy_loss | 2.1e+10 |\n| std | 0.838 |\n| value_loss | 3.87e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 271 |\n| iterations | 4600 |\n| time_elapsed | 84 |\n| total_timesteps | 23000 |\n| train/ | |\n| entropy_loss | -36.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4599 |\n| policy_loss | 2.96e+10 |\n| std | 0.83 |\n| value_loss | 7.68e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 271 |\n| iterations | 4700 |\n| time_elapsed | 86 |\n| total_timesteps | 23500 |\n| train/ | |\n| entropy_loss | -36.2 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4699 |\n| policy_loss | 4.53e+10 |\n| std | 0.822 |\n| value_loss | 2.03e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4470898.677245838\nSharpe: 1.3927368376490192\n=================================\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 4800 |\n| time_elapsed | 88 |\n| total_timesteps | 24000 |\n| train/ | |\n| entropy_loss | -36.2 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4799 |\n| policy_loss | 1.21e+10 |\n| std | 0.822 |\n| value_loss | 1.34e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 4900 |\n| time_elapsed | 90 |\n| total_timesteps | 24500 |\n| train/ | |\n| entropy_loss | -36 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 4899 |\n| policy_loss | 2.19e+10 |\n| std | 0.816 |\n| value_loss | 4.08e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 5000 |\n| time_elapsed | 92 |\n| total_timesteps | 25000 |\n| train/ | |\n| entropy_loss | -35.9 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 4999 |\n| policy_loss | 2.92e+10 |\n| std | 0.812 |\n| value_loss | 8.27e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 5100 |\n| time_elapsed | 94 |\n| total_timesteps | 25500 |\n| train/ | |\n| entropy_loss | -35.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5099 |\n| policy_loss | 4.65e+10 |\n| std | 0.806 |\n| value_loss | 2.07e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4296842.304094036\nSharpe: 1.3597613555884178\n=================================\n------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 5200 |\n| time_elapsed | 96 |\n| total_timesteps | 26000 |\n| train/ | |\n| entropy_loss | -35.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5199 |\n| policy_loss | 1.51e+10 |\n| std | 0.804 |\n| value_loss | 1.58e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 5300 |\n| time_elapsed | 98 |\n| total_timesteps | 26500 |\n| train/ | |\n| entropy_loss | -35.2 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 5299 |\n| policy_loss | 2.2e+10 |\n| std | 0.794 |\n| value_loss | 4.89e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 5400 |\n| time_elapsed | 100 |\n| total_timesteps | 27000 |\n| train/ | |\n| entropy_loss | -35.2 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5399 |\n| policy_loss | 3.51e+10 |\n| std | 0.794 |\n| value_loss | 9.6e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 5500 |\n| time_elapsed | 102 |\n| total_timesteps | 27500 |\n| train/ | |\n| entropy_loss | -35.1 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 5499 |\n| policy_loss | 4.53e+10 |\n| std | 0.792 |\n| value_loss | 2.2e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4382524.460103042\nSharpe: 1.4196015536476876\n=================================\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 5600 |\n| time_elapsed | 104 |\n| total_timesteps | 28000 |\n| train/ | |\n| entropy_loss | -35 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5599 |\n| policy_loss | 1.25e+10 |\n| std | 0.79 |\n| value_loss | 1.78e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 5700 |\n| time_elapsed | 106 |\n| total_timesteps | 28500 |\n| train/ | |\n| entropy_loss | -34.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5699 |\n| policy_loss | 2.37e+10 |\n| std | 0.788 |\n| value_loss | 4.93e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 5800 |\n| time_elapsed | 108 |\n| total_timesteps | 29000 |\n| train/ | |\n| entropy_loss | -34.8 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 5799 |\n| policy_loss | 2.98e+10 |\n| std | 0.786 |\n| value_loss | 8.7e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 5900 |\n| time_elapsed | 110 |\n| total_timesteps | 29500 |\n| train/ | |\n| entropy_loss | -34.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 5899 |\n| policy_loss | 4.01e+10 |\n| std | 0.784 |\n| value_loss | 1.78e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4197090.0885834135\nSharpe: 1.3780442866941907\n=================================\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 6000 |\n| time_elapsed | 112 |\n| total_timesteps | 30000 |\n| train/ | |\n| entropy_loss | -34.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 5999 |\n| policy_loss | 1.35e+10 |\n| std | 0.783 |\n| value_loss | 1.89e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 6100 |\n| time_elapsed | 114 |\n| total_timesteps | 30500 |\n| train/ | |\n| entropy_loss | -34.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 6099 |\n| policy_loss | 2.29e+10 |\n| std | 0.782 |\n| value_loss | 5.74e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 6200 |\n| time_elapsed | 116 |\n| total_timesteps | 31000 |\n| train/ | |\n| entropy_loss | -34.3 |\n| explained_variance | 1.79e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6199 |\n| policy_loss | 2.6e+10 |\n| std | 0.776 |\n| value_loss | 7.6e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 6300 |\n| time_elapsed | 118 |\n| total_timesteps | 31500 |\n| train/ | |\n| entropy_loss | -34 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 6299 |\n| policy_loss | 4.44e+10 |\n| std | 0.771 |\n| value_loss | 1.89e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4143469.11676245\nSharpe: 1.3512167972745868\n=================================\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 6400 |\n| time_elapsed | 120 |\n| total_timesteps | 32000 |\n| train/ | |\n| entropy_loss | -33.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 6399 |\n| policy_loss | 1.39e+10 |\n| std | 0.765 |\n| value_loss | 2.19e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 6500 |\n| time_elapsed | 122 |\n| total_timesteps | 32500 |\n| train/ | |\n| entropy_loss | -33.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6499 |\n| policy_loss | 2.49e+10 |\n| std | 0.766 |\n| value_loss | 6.48e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 6600 |\n| time_elapsed | 124 |\n| total_timesteps | 33000 |\n| train/ | |\n| entropy_loss | -33.8 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6599 |\n| policy_loss | 2.5e+10 |\n| std | 0.767 |\n| value_loss | 9.08e+17 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4281019.515049888\nSharpe: 1.3958026772144092\n=================================\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 6700 |\n| time_elapsed | 126 |\n| total_timesteps | 33500 |\n| train/ | |\n| entropy_loss | -33.7 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6699 |\n| policy_loss | 8.98e+09 |\n| std | 0.767 |\n| value_loss | 8.8e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 6800 |\n| time_elapsed | 128 |\n| total_timesteps | 34000 |\n| train/ | |\n| entropy_loss | -33.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 6799 |\n| policy_loss | 1.4e+10 |\n| std | 0.765 |\n| value_loss | 2.19e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 6900 |\n| time_elapsed | 130 |\n| total_timesteps | 34500 |\n| train/ | |\n| entropy_loss | -33.2 |\n| explained_variance | 1.79e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6899 |\n| policy_loss | 2.87e+10 |\n| std | 0.756 |\n| value_loss | 7.98e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 7000 |\n| time_elapsed | 132 |\n| total_timesteps | 35000 |\n| train/ | |\n| entropy_loss | -33.2 |\n| explained_variance | 1.79e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 6999 |\n| policy_loss | 3.21e+10 |\n| std | 0.757 |\n| value_loss | 1.21e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4755272.128580953\nSharpe: 1.484040317053098\n=================================\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 7100 |\n| time_elapsed | 134 |\n| total_timesteps | 35500 |\n| train/ | |\n| entropy_loss | -33.1 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 7099 |\n| policy_loss | 1.1e+10 |\n| std | 0.754 |\n| value_loss | 9.64e+16 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 7200 |\n| time_elapsed | 136 |\n| total_timesteps | 36000 |\n| train/ | |\n| entropy_loss | -32.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 7199 |\n| policy_loss | 1.23e+10 |\n| std | 0.748 |\n| value_loss | 2.05e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 7300 |\n| time_elapsed | 138 |\n| total_timesteps | 36500 |\n| train/ | |\n| entropy_loss | -32.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 7299 |\n| policy_loss | 2.44e+10 |\n| std | 0.745 |\n| value_loss | 7.21e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 7400 |\n| time_elapsed | 140 |\n| total_timesteps | 37000 |\n| train/ | |\n| entropy_loss | -32.4 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 7399 |\n| policy_loss | 3.3e+10 |\n| std | 0.737 |\n| value_loss | 1.27e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4728048.330437017\nSharpe: 1.4507259207464278\n=================================\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 7500 |\n| time_elapsed | 142 |\n| total_timesteps | 37500 |\n| train/ | |\n| entropy_loss | -32.3 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 7499 |\n| policy_loss | 8.58e+09 |\n| std | 0.733 |\n| value_loss | 1.03e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 7600 |\n| time_elapsed | 144 |\n| total_timesteps | 38000 |\n| train/ | |\n| entropy_loss | -32.2 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 7599 |\n| policy_loss | 1.43e+10 |\n| std | 0.732 |\n| value_loss | 2.43e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 7700 |\n| time_elapsed | 146 |\n| total_timesteps | 38500 |\n| train/ | |\n| entropy_loss | -31.9 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 7699 |\n| policy_loss | 2.19e+10 |\n| std | 0.725 |\n| value_loss | 6.87e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 7800 |\n| time_elapsed | 148 |\n| total_timesteps | 39000 |\n| train/ | |\n| entropy_loss | -31.9 |\n| explained_variance | 1.79e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 7799 |\n| policy_loss | 3e+10 |\n| std | 0.728 |\n| value_loss | 1.27e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4447220.288681249\nSharpe: 1.4028155528429929\n=================================\n-------------------------------------\n| time/ | |\n| fps | 261 |\n| iterations | 7900 |\n| time_elapsed | 151 |\n| total_timesteps | 39500 |\n| train/ | |\n| entropy_loss | -31.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 7899 |\n| policy_loss | 8.86e+09 |\n| std | 0.724 |\n| value_loss | 1e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 8000 |\n| time_elapsed | 152 |\n| total_timesteps | 40000 |\n| train/ | |\n| entropy_loss | -31.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 7999 |\n| policy_loss | 1.45e+10 |\n| std | 0.722 |\n| value_loss | 2.28e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 8100 |\n| time_elapsed | 154 |\n| total_timesteps | 40500 |\n| train/ | |\n| entropy_loss | -31.3 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 8099 |\n| policy_loss | 2.6e+10 |\n| std | 0.717 |\n| value_loss | 7.72e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 8200 |\n| time_elapsed | 155 |\n| total_timesteps | 41000 |\n| train/ | |\n| entropy_loss | -31.3 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 8199 |\n| policy_loss | 3.39e+10 |\n| std | 0.717 |\n| value_loss | 1.32e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4235922.795545344\nSharpe: 1.3746686881300678\n=================================\n------------------------------------\n| time/ | |\n| fps | 262 |\n| iterations | 8300 |\n| time_elapsed | 157 |\n| total_timesteps | 41500 |\n| train/ | |\n| entropy_loss | -31.1 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 8299 |\n| policy_loss | 1e+10 |\n| std | 0.711 |\n| value_loss | 1.12e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 8400 |\n| time_elapsed | 159 |\n| total_timesteps | 42000 |\n| train/ | |\n| entropy_loss | -31.1 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 8399 |\n| policy_loss | 1.27e+10 |\n| std | 0.71 |\n| value_loss | 1.92e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 8500 |\n| time_elapsed | 161 |\n| total_timesteps | 42500 |\n| train/ | |\n| entropy_loss | -30.7 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 8499 |\n| policy_loss | 2.03e+10 |\n| std | 0.701 |\n| value_loss | 6.03e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 8600 |\n| time_elapsed | 162 |\n| total_timesteps | 43000 |\n| train/ | |\n| entropy_loss | -30.4 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 8599 |\n| policy_loss | 2.61e+10 |\n| std | 0.694 |\n| value_loss | 1.17e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4012838.04546442\nSharpe: 1.3309561023135357\n=================================\n-------------------------------------\n| time/ | |\n| fps | 263 |\n| iterations | 8700 |\n| time_elapsed | 164 |\n| total_timesteps | 43500 |\n| train/ | |\n| entropy_loss | -30.2 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 8699 |\n| policy_loss | 8.54e+09 |\n| std | 0.691 |\n| value_loss | 1.05e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 8800 |\n| time_elapsed | 166 |\n| total_timesteps | 44000 |\n| train/ | |\n| entropy_loss | -30 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 8799 |\n| policy_loss | 1.3e+10 |\n| std | 0.689 |\n| value_loss | 2.18e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 264 |\n| iterations | 8900 |\n| time_elapsed | 168 |\n| total_timesteps | 44500 |\n| train/ | |\n| entropy_loss | -29.8 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 8899 |\n| policy_loss | 2e+10 |\n| std | 0.686 |\n| value_loss | 6.44e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9000 |\n| time_elapsed | 169 |\n| total_timesteps | 45000 |\n| train/ | |\n| entropy_loss | -29.5 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 8999 |\n| policy_loss | 3.11e+10 |\n| std | 0.68 |\n| value_loss | 1.33e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:4177082.843864525\nSharpe: 1.3606287982215146\n=================================\n-------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9100 |\n| time_elapsed | 171 |\n| total_timesteps | 45500 |\n| train/ | |\n| entropy_loss | -29.4 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 9099 |\n| policy_loss | 8.66e+09 |\n| std | 0.678 |\n| value_loss | 1.11e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9200 |\n| time_elapsed | 173 |\n| total_timesteps | 46000 |\n| train/ | |\n| entropy_loss | -29 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9199 |\n| policy_loss | 1.41e+10 |\n| std | 0.671 |\n| value_loss | 2.36e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9300 |\n| time_elapsed | 174 |\n| total_timesteps | 46500 |\n| train/ | |\n| entropy_loss | -29 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 9299 |\n| policy_loss | 1.93e+10 |\n| std | 0.671 |\n| value_loss | 5.9e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9400 |\n| time_elapsed | 176 |\n| total_timesteps | 47000 |\n| train/ | |\n| entropy_loss | -28.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9399 |\n| policy_loss | 3.02e+10 |\n| std | 0.667 |\n| value_loss | 1.34e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3955956.7293339428\nSharpe: 1.3227434143932202\n=================================\n-------------------------------------\n| time/ | |\n| fps | 265 |\n| iterations | 9500 |\n| time_elapsed | 178 |\n| total_timesteps | 47500 |\n| train/ | |\n| entropy_loss | -28.9 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 9499 |\n| policy_loss | 7.85e+09 |\n| std | 0.67 |\n| value_loss | 9.28e+16 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 9600 |\n| time_elapsed | 180 |\n| total_timesteps | 48000 |\n| train/ | |\n| entropy_loss | -28.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9599 |\n| policy_loss | 1.23e+10 |\n| std | 0.667 |\n| value_loss | 2.29e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 9700 |\n| time_elapsed | 182 |\n| total_timesteps | 48500 |\n| train/ | |\n| entropy_loss | -28.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9699 |\n| policy_loss | 1.7e+10 |\n| std | 0.665 |\n| value_loss | 4.65e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 9800 |\n| time_elapsed | 183 |\n| total_timesteps | 49000 |\n| train/ | |\n| entropy_loss | -28.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9799 |\n| policy_loss | 2.98e+10 |\n| std | 0.663 |\n| value_loss | 1.32e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3664543.976893935\nSharpe: 1.244869609790809\n=================================\n-------------------------------------\n| time/ | |\n| fps | 266 |\n| iterations | 9900 |\n| time_elapsed | 185 |\n| total_timesteps | 49500 |\n| train/ | |\n| entropy_loss | -28.4 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 9899 |\n| policy_loss | 8.65e+09 |\n| std | 0.659 |\n| value_loss | 1.17e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10000 |\n| time_elapsed | 187 |\n| total_timesteps | 50000 |\n| train/ | |\n| entropy_loss | -27.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 9999 |\n| policy_loss | 1.1e+10 |\n| std | 0.651 |\n| value_loss | 2.07e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10100 |\n| time_elapsed | 188 |\n| total_timesteps | 50500 |\n| train/ | |\n| entropy_loss | -27.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 10099 |\n| policy_loss | 1.72e+10 |\n| std | 0.648 |\n| value_loss | 4.43e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10200 |\n| time_elapsed | 190 |\n| total_timesteps | 51000 |\n| train/ | |\n| entropy_loss | -27.5 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 10199 |\n| policy_loss | 2.73e+10 |\n| std | 0.641 |\n| value_loss | 1.14e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3650206.454383789\nSharpe: 1.2363504211358363\n=================================\n-------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10300 |\n| time_elapsed | 192 |\n| total_timesteps | 51500 |\n| train/ | |\n| entropy_loss | -27.3 |\n| explained_variance | -2.38e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 10299 |\n| policy_loss | 8.5e+09 |\n| std | 0.637 |\n| value_loss | 1.21e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10400 |\n| time_elapsed | 194 |\n| total_timesteps | 52000 |\n| train/ | |\n| entropy_loss | -26.9 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 10399 |\n| policy_loss | 1.18e+10 |\n| std | 0.63 |\n| value_loss | 2.56e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10500 |\n| time_elapsed | 196 |\n| total_timesteps | 52500 |\n| train/ | |\n| entropy_loss | -26.7 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 10499 |\n| policy_loss | 1.6e+10 |\n| std | 0.627 |\n| value_loss | 5.41e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 10600 |\n| time_elapsed | 197 |\n| total_timesteps | 53000 |\n| train/ | |\n| entropy_loss | -26.8 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 10599 |\n| policy_loss | 3.16e+10 |\n| std | 0.63 |\n| value_loss | 1.29e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3698471.4714966253\nSharpe: 1.2467360881694427\n=================================\n------------------------------------\n| time/ | |\n| fps | 267 |\n| iterations | 10700 |\n| time_elapsed | 199 |\n| total_timesteps | 53500 |\n| train/ | |\n| entropy_loss | -26.6 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 10699 |\n| policy_loss | 8.42e+09 |\n| std | 0.629 |\n| value_loss | 1.17e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 10800 |\n| time_elapsed | 201 |\n| total_timesteps | 54000 |\n| train/ | |\n| entropy_loss | -26.3 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 10799 |\n| policy_loss | 1.32e+10 |\n| std | 0.624 |\n| value_loss | 3.11e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 10900 |\n| time_elapsed | 202 |\n| total_timesteps | 54500 |\n| train/ | |\n| entropy_loss | -26.1 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 10899 |\n| policy_loss | 1.84e+10 |\n| std | 0.621 |\n| value_loss | 5.78e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 11000 |\n| time_elapsed | 204 |\n| total_timesteps | 55000 |\n| train/ | |\n| entropy_loss | -25.7 |\n| explained_variance | 1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 10999 |\n| policy_loss | 2.78e+10 |\n| std | 0.615 |\n| value_loss | 1.32e+18 |\n------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3641409.865902348\nSharpe: 1.2354237913350283\n=================================\n-------------------------------------\n| time/ | |\n| fps | 268 |\n| iterations | 11100 |\n| time_elapsed | 206 |\n| total_timesteps | 55500 |\n| train/ | |\n| entropy_loss | -25.7 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 11099 |\n| policy_loss | 8.24e+09 |\n| std | 0.616 |\n| value_loss | 1.27e+17 |\n-------------------------------------\n------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 11200 |\n| time_elapsed | 208 |\n| total_timesteps | 56000 |\n| train/ | |\n| entropy_loss | -25.3 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 11199 |\n| policy_loss | 1.49e+10 |\n| std | 0.608 |\n| value_loss | 3.73e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 11300 |\n| time_elapsed | 209 |\n| total_timesteps | 56500 |\n| train/ | |\n| entropy_loss | -25.2 |\n| explained_variance | 6.56e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 11299 |\n| policy_loss | 1.97e+10 |\n| std | 0.608 |\n| value_loss | 6.59e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 11400 |\n| time_elapsed | 211 |\n| total_timesteps | 57000 |\n| train/ | |\n| entropy_loss | -25.1 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 11399 |\n| policy_loss | 2.66e+10 |\n| std | 0.605 |\n| value_loss | 1.38e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3639765.138494633\nSharpe: 1.2296605419011317\n=================================\n------------------------------------\n| time/ | |\n| fps | 269 |\n| iterations | 11500 |\n| time_elapsed | 213 |\n| total_timesteps | 57500 |\n| train/ | |\n| entropy_loss | -25 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 11499 |\n| policy_loss | 8.55e+09 |\n| std | 0.604 |\n| value_loss | 1.4e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 11600 |\n| time_elapsed | 214 |\n| total_timesteps | 58000 |\n| train/ | |\n| entropy_loss | -24.8 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 11599 |\n| policy_loss | 1.48e+10 |\n| std | 0.6 |\n| value_loss | 4.03e+17 |\n------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 11700 |\n| time_elapsed | 216 |\n| total_timesteps | 58500 |\n| train/ | |\n| entropy_loss | -24.4 |\n| explained_variance | -1.19e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 11699 |\n| policy_loss | 1.78e+10 |\n| std | 0.594 |\n| value_loss | 7.23e+17 |\n-------------------------------------\n-------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 11800 |\n| time_elapsed | 218 |\n| total_timesteps | 59000 |\n| train/ | |\n| entropy_loss | -24.2 |\n| explained_variance | -9.54e-07 |\n| learning_rate | 0.0025 |\n| n_updates | 11799 |\n| policy_loss | 2.91e+10 |\n| std | 0.588 |\n| value_loss | 1.52e+18 |\n-------------------------------------\n=================================\nbegin_total_asset:1000000\nend_total_asset:3675920.506127791\nSharpe: 1.2461091476653254\n=================================\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 11900 |\n| time_elapsed | 220 |\n| total_timesteps | 59500 |\n| train/ | |\n| entropy_loss | -23.9 |\n| explained_variance | 5.96e-08 |\n| learning_rate | 0.0025 |\n| n_updates | 11899 |\n| policy_loss | 9.11e+09 |\n| std | 0.583 |\n| value_loss | 1.58e+17 |\n------------------------------------\n------------------------------------\n| time/ | |\n| fps | 270 |\n| iterations | 12000 |\n| time_elapsed | 221 |\n| total_timesteps | 60000 |\n| train/ | |\n| entropy_loss | -24 |\n| explained_variance | 0 |\n| learning_rate | 0.0025 |\n| n_updates | 11999 |\n| policy_loss | 1.51e+10 |\n| std | 0.585 |\n| value_loss | 4.26e+17 |\n------------------------------------\n"
],
[
"# agent = DRLAgent(env = env_train)\n# DDPG_PARAMS = {\"learning_rate\": 0.0025}\n# model_ddpg = agent.get_model(model_name=\"ddpg\",model_kwargs = DDPG_PARAMS)\n\n# trained_ddpg = agent.train_model(model=model_ddpg, \n# tb_log_name='ddpg',\n# total_timesteps=50000)",
"_____no_output_____"
],
[
"trade = data_split(processed,'2019-01-01', '2021-01-01')\ne_trade_gym = StockPortfolioEnv(df = trade, **env_kwargs)",
"_____no_output_____"
],
[
"df_daily_return, df_actions = DRLAgent.DRL_prediction(model=trained_a2c,\n environment = e_trade_gym)",
"=================================\nbegin_total_asset:1000000\nend_total_asset:nan\nSharpe: 0.9192189397261374\n=================================\nhit end!\n"
],
[
"from pyfolio import timeseries\nDRL_strat = convert_daily_return_to_pyfolio_ts(df_daily_return)\nperf_func = timeseries.perf_stats \nperf_stats_all = perf_func( returns=DRL_strat, \n factor_returns=DRL_strat, \n positions=None, transactions=None, turnover_denom=\"AGB\")",
"_____no_output_____"
],
[
"#baseline stats\nprint(\"==============Get Baseline Stats===========\")\nbaseline_df = get_baseline(\n ticker=\"^BSESN\", \n start = df_daily_return.loc[0,'date'],\n end = df_daily_return.loc[len(df_daily_return)-1,'date'])\n\nstats = backtest_stats(baseline_df, value_col_name = 'close')",
"==============Get Baseline Stats===========\n[*********************100%***********************] 1 of 1 completed\nShape of DataFrame: (489, 8)\nAnnual return 0.158443\nCumulative returns 0.330292\nAnnual volatility 0.248750\nSharpe ratio 0.718724\nCalmar ratio 0.416188\nStability 0.019371\nMax drawdown -0.380701\nOmega ratio 1.160023\nSortino ratio 0.966249\nSkew NaN\nKurtosis NaN\nTail ratio 0.947229\nDaily value at risk -0.030630\ndtype: float64\n"
],
[
"import pyfolio\nimport datetime as dt\n%matplotlib inline\n\nbaseline_df = get_baseline(\n ticker='^BSESN', start='2019-01-01', end='2021-01-01')\n\n\nbaseline_returns = get_daily_return(baseline_df, value_col_name=\"close\")\n\nwith pyfolio.plotting.plotting_context(font_scale=1.1):\n pyfolio.create_full_tear_sheet(returns = DRL_strat,\n benchmark_rets=baseline_returns, set_context=False)",
"\r[*********************100%***********************] 1 of 1 completed\nShape of DataFrame: (490, 8)\n"
],
[
" df_daily_return.to_csv('df_returns_ret1.csv')",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa2973fa3b0b47587f5e00b2ae084875eff264 | 7,410 | ipynb | Jupyter Notebook | _downloads/b1ca754f39005f3188ba9b4423f688b0/plot_otda_d2.ipynb | FlopsKa/pythonot.github.io | 3d5baca1e48e09bb076036d99a835e34af9fce80 | [
"MIT"
] | 5 | 2020-06-12T10:53:15.000Z | 2021-11-06T13:21:56.000Z | _downloads/b1ca754f39005f3188ba9b4423f688b0/plot_otda_d2.ipynb | FlopsKa/pythonot.github.io | 3d5baca1e48e09bb076036d99a835e34af9fce80 | [
"MIT"
] | 1 | 2020-08-28T08:15:56.000Z | 2020-08-28T08:15:56.000Z | _downloads/b1ca754f39005f3188ba9b4423f688b0/plot_otda_d2.ipynb | FlopsKa/pythonot.github.io | 3d5baca1e48e09bb076036d99a835e34af9fce80 | [
"MIT"
] | 1 | 2020-08-28T08:08:09.000Z | 2020-08-28T08:08:09.000Z | 51.458333 | 1,529 | 0.578677 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\n# OT for domain adaptation on empirical distributions\n\n\nThis example introduces a domain adaptation in a 2D setting. It explicits\nthe problem of domain adaptation and introduces some optimal transport\napproaches to solve it.\n\nQuantities such as optimal couplings, greater coupling coefficients and\ntransported samples are represented in order to give a visual understanding\nof what the transport methods are doing.\n",
"_____no_output_____"
]
],
[
[
"# Authors: Remi Flamary <[email protected]>\n# Stanislas Chambon <[email protected]>\n#\n# License: MIT License\n\nimport matplotlib.pylab as pl\nimport ot\nimport ot.plot",
"_____no_output_____"
]
],
[
[
"generate data\n-------------\n\n",
"_____no_output_____"
]
],
[
[
"n_samples_source = 150\nn_samples_target = 150\n\nXs, ys = ot.datasets.make_data_classif('3gauss', n_samples_source)\nXt, yt = ot.datasets.make_data_classif('3gauss2', n_samples_target)\n\n# Cost matrix\nM = ot.dist(Xs, Xt, metric='sqeuclidean')",
"_____no_output_____"
]
],
[
[
"Instantiate the different transport algorithms and fit them\n-----------------------------------------------------------\n\n",
"_____no_output_____"
]
],
[
[
"# EMD Transport\not_emd = ot.da.EMDTransport()\not_emd.fit(Xs=Xs, Xt=Xt)\n\n# Sinkhorn Transport\not_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)\not_sinkhorn.fit(Xs=Xs, Xt=Xt)\n\n# Sinkhorn Transport with Group lasso regularization\not_lpl1 = ot.da.SinkhornLpl1Transport(reg_e=1e-1, reg_cl=1e0)\not_lpl1.fit(Xs=Xs, ys=ys, Xt=Xt)\n\n# transport source samples onto target samples\ntransp_Xs_emd = ot_emd.transform(Xs=Xs)\ntransp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=Xs)\ntransp_Xs_lpl1 = ot_lpl1.transform(Xs=Xs)",
"_____no_output_____"
]
],
[
[
"Fig 1 : plots source and target samples + matrix of pairwise distance\n---------------------------------------------------------------------\n\n",
"_____no_output_____"
]
],
[
[
"pl.figure(1, figsize=(10, 10))\npl.subplot(2, 2, 1)\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Source samples')\n\npl.subplot(2, 2, 2)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.legend(loc=0)\npl.title('Target samples')\n\npl.subplot(2, 2, 3)\npl.imshow(M, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Matrix of pairwise distances')\npl.tight_layout()",
"_____no_output_____"
]
],
[
[
"Fig 2 : plots optimal couplings for the different methods\n---------------------------------------------------------\n\n",
"_____no_output_____"
]
],
[
[
"pl.figure(2, figsize=(10, 6))\n\npl.subplot(2, 3, 1)\npl.imshow(ot_emd.coupling_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nEMDTransport')\n\npl.subplot(2, 3, 2)\npl.imshow(ot_sinkhorn.coupling_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSinkhornTransport')\n\npl.subplot(2, 3, 3)\npl.imshow(ot_lpl1.coupling_, interpolation='nearest')\npl.xticks([])\npl.yticks([])\npl.title('Optimal coupling\\nSinkhornLpl1Transport')\n\npl.subplot(2, 3, 4)\not.plot.plot2D_samples_mat(Xs, Xt, ot_emd.coupling_, c=[.5, .5, 1])\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.title('Main coupling coefficients\\nEMDTransport')\n\npl.subplot(2, 3, 5)\not.plot.plot2D_samples_mat(Xs, Xt, ot_sinkhorn.coupling_, c=[.5, .5, 1])\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.title('Main coupling coefficients\\nSinkhornTransport')\n\npl.subplot(2, 3, 6)\not.plot.plot2D_samples_mat(Xs, Xt, ot_lpl1.coupling_, c=[.5, .5, 1])\npl.scatter(Xs[:, 0], Xs[:, 1], c=ys, marker='+', label='Source samples')\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o', label='Target samples')\npl.xticks([])\npl.yticks([])\npl.title('Main coupling coefficients\\nSinkhornLpl1Transport')\npl.tight_layout()",
"_____no_output_____"
]
],
[
[
"Fig 3 : plot transported samples\n--------------------------------\n\n",
"_____no_output_____"
]
],
[
[
"# display transported samples\npl.figure(4, figsize=(10, 4))\npl.subplot(1, 3, 1)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.5)\npl.scatter(transp_Xs_emd[:, 0], transp_Xs_emd[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.title('Transported samples\\nEmdTransport')\npl.legend(loc=0)\npl.xticks([])\npl.yticks([])\n\npl.subplot(1, 3, 2)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.5)\npl.scatter(transp_Xs_sinkhorn[:, 0], transp_Xs_sinkhorn[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.title('Transported samples\\nSinkhornTransport')\npl.xticks([])\npl.yticks([])\n\npl.subplot(1, 3, 3)\npl.scatter(Xt[:, 0], Xt[:, 1], c=yt, marker='o',\n label='Target samples', alpha=0.5)\npl.scatter(transp_Xs_lpl1[:, 0], transp_Xs_lpl1[:, 1], c=ys,\n marker='+', label='Transp samples', s=30)\npl.title('Transported samples\\nSinkhornLpl1Transport')\npl.xticks([])\npl.yticks([])\n\npl.tight_layout()\npl.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa355472daf463067b094765f2009b0f9b461b | 16,529 | ipynb | Jupyter Notebook | neptune-sagemaker/notebooks/Let-Me-Graph-That-For-You/01-Air-Routes.ipynb | JanuaryThomas/amazon-neptune-samples | a33f481707014025b41857966144b4a59d6c553b | [
"MIT-0"
] | 298 | 2018-04-16T17:34:01.000Z | 2022-03-27T06:53:21.000Z | neptune-sagemaker/notebooks/Let-Me-Graph-That-For-You/01-Air-Routes.ipynb | JanuaryThomas/amazon-neptune-samples | a33f481707014025b41857966144b4a59d6c553b | [
"MIT-0"
] | 24 | 2018-06-07T12:48:56.000Z | 2022-03-29T14:28:06.000Z | neptune-sagemaker/notebooks/Let-Me-Graph-That-For-You/01-Air-Routes.ipynb | JanuaryThomas/amazon-neptune-samples | a33f481707014025b41857966144b4a59d6c553b | [
"MIT-0"
] | 132 | 2018-05-31T02:58:04.000Z | 2022-03-29T21:02:05.000Z | 38.619159 | 824 | 0.621695 | [
[
[
"# Air Routes\n\nThe examples in this notebook demonstrate using the GremlinPython library to connect to and work with a Neptune instance. Using a Jupyter notebook in this way provides a nice way to interact with your Neptune graph database in a familiar and instantly productive environment.",
"_____no_output_____"
],
[
"## Load the Air Routes dataset\n\nWhen the SageMaker notebook instance was created the appropriate Python libraries for working with a Tinkerpop enabled graph were installed. We now need to `import` some classes from those libraries before connecting to our Neptune instance, loading some sample data, and running queries. \n\nThe `neptune.py` helper module that was installed in the _util_ directory does all the necessary heavy lifting with regard to importing classes and loading the air routes dataset. You can reuse this module in your own notebooks, or consult its source code to see how to configure GremlinPython.",
"_____no_output_____"
]
],
[
[
"%run '../util/neptune.py'",
"_____no_output_____"
]
],
[
[
"Using the neptune module, we can clear any existing data from the database, and load the air routes graph:",
"_____no_output_____"
]
],
[
[
"neptune.clear()\nneptune.bulkLoad('s3://aws-neptune-customer-samples-${AWS_REGION}/neptune-sagemaker/data/let-me-graph-that-for-you/01-air-routes/', interval=5)",
"_____no_output_____"
]
],
[
[
"## Establish access to our Neptune instance\n\nBefore we can work with our graph we need to establish a connection to it. This is done using the `DriverRemoteConnection` capability as defined by Apache TinkerPop and supported by GremlinPython. The `neptune.py` helper module facilitates creating this connection.\n\nOnce this cell has been run we will be able to use the variable `g` to refer to our graph in Gremlin queries in subsequent cells. By default Neptune uses port 8182 and that is what we connect to below. When you configure your own Neptune instance you can you choose a different endpoint and port number by specifiying the `neptune_endpoint` and `neptune_port` parameters to the `graphTraversal()` method.",
"_____no_output_____"
]
],
[
[
"g = neptune.graphTraversal()",
"_____no_output_____"
]
],
[
[
"## Let's find out a bit about the graph\n\nLet's start off with a simple query just to make sure our connection to Neptune is working. The queries below look at all of the vertices and edges in the graph and create two maps that show the demographic of the graph. As we are using the air routes data set, not surprisingly, the values returned are related to airports and routes.",
"_____no_output_____"
]
],
[
[
"vertices = g.V().groupCount().by(T.label).toList()\nedges = g.E().groupCount().by(T.label).toList()\nprint(vertices)\nprint(edges)",
"_____no_output_____"
]
],
[
[
"## Find routes longer than 8,400 miles\n\nThe query below finds routes in the graph that are longer than 8,400 miles. This is done by examining the `dist` property of the `routes` edges in the graph. Having found some edges that meet our criteria we sort them in descending order by distance. The `where` step filters out the reverse direction routes for the ones that we have already found beacuse we do not, in this case, want two results for each route. As an experiment, try removing the `where` line and observe the additional results that are returned. Lastly we generate some `path` results using the airport codes and route distances. Notice how we have laid the Gremlin query out over multiple lines to make it easier to read. To avoid errors, when you lay out a query in this way using Python, each line must end with a backslash character \"\\\".\n\nThe results from running the query will be placed into the variable `paths`. Notice how we ended the Gremlin query with a call to `toList`. This tells Gremlin that we want our results back in a list. We can then use a Python `for` loop to print those results. Each entry in the list will itself be a list containing the starting airport code, the length of the route and the destination airport code.",
"_____no_output_____"
]
],
[
[
"paths = g.V().hasLabel('airport').as_('a') \\\n .outE('route').has('dist',gt(8400)) \\\n .order().by('dist',Order.decr) \\\n .inV() \\\n .where(P.lt('a')).by('code') \\\n .path().by('code').by('dist').by('code').toList()\n\nfor p in paths:\n print(p)",
"_____no_output_____"
]
],
[
[
"## Draw a Bar Chart that represents the routes we just found.\n\nOne of the nice things about using Python to work with our graph is that we can take advantage of the larger Python ecosystem of libraries such as `matplotlib`, `numpy` and `pandas` to further analyze our data and represent it pictorially. So, now that we have found some long airline routes we can build a bar chart that represents them graphically.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n \nroutes = list()\ndist = list()\n\n# Construct the x-axis labels by combining the airport pairs we found\n# into strings with with a \"-\" between them. We also build a list containing\n# the distance values that will be used to construct and label the bars.\nfor i in range(len(paths)):\n routes.append(paths[i][0] + '-' + paths[i][2])\n dist.append(paths[i][1])\n\n# Setup everything we need to draw the chart\ny_pos = np.arange(len(routes))\ny_labels = (0,1000,2000,3000,4000,5000,6000,7000,8000,9000)\nfreq_series = pd.Series(dist) \nplt.figure(figsize=(11,6))\nfs = freq_series.plot(kind='bar')\nfs.set_xticks(y_pos, routes)\nfs.set_ylabel('Miles')\nfs.set_title('Longest routes')\nfs.set_yticklabels(y_labels)\nfs.set_xticklabels(routes)\nfs.yaxis.set_ticks(np.arange(0, 10000, 1000))\nfs.yaxis.set_ticklabels(y_labels)\n\n# Annotate each bar with the distance value\nfor i in range(len(paths)):\n fs.annotate(dist[i],xy=(i,dist[i]+60),xycoords='data',ha='center')\n\n# We are finally ready to draw the bar chart\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Explore the distribution of airports by continent\n\nThe next example queries the graph to find out how many airports are in each continent. The query starts by finding all vertices that are continents. Next, those vertices are grouped, which creates a map (or dict) whose keys are the continent descriptions and whose values represent the counts of the outgoing edges with a 'contains' label. Finally the resulting map is sorted using the keys in ascending order. That result is then returned to our Python code as the variable `m`. Finally we can print the map nicely using regular Python concepts.",
"_____no_output_____"
]
],
[
[
"# Return a map where the keys are the continent names and the values are the\n# number of airports in that continent.\nm = g.V().hasLabel('continent') \\\n .group().by('desc').by(__.out('contains').count()) \\\n .order(Scope.local).by(Column.keys) \\\n .next()\n\nfor c,n in m.items():\n print('%4d %s' %(n,c))",
"_____no_output_____"
]
],
[
[
"## Draw a pie chart representing the distribution by continent\n\nRather than return the results as text like we did above, it might be nicer to display them as percentages on a pie chart. That is what the code in the next cell does. Rather than return the descriptions of the continents (their names) this time our Gremlin query simply retrieves the two digit character code representing each continent.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\n\n# Return a map where the keys are the continent codes and the values are the\n# number of airports in that continent.\nm = g.V().hasLabel('continent').group().by('code').by(__.out().count()).next()\n\nfig,pie1 = plt.subplots()\n\npie1.pie(m.values() \\\n ,labels=m.keys() \\\n ,autopct='%1.1f%%'\\\n ,shadow=True \\\n ,startangle=90 \\\n ,explode=(0,0,0.1,0,0,0,0))\n\npie1.axis('equal') \n\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Find some routes from London to San Jose and draw them\n\nOne of the nice things about connected graph data is that it lends itself nicely to visualization that people can get value from looking at. The Python `networkx` library makes it fairly easy to draw a graph. The next example takes advantage of this capability to draw a directed graph (DiGraph) of a few airline routes.\n\nThe query below starts by finding the vertex that represents London Heathrow (LHR). It then finds 15 routes from LHR that end up in San Jose California (SJC) with one stop on the way. Those routes are returned as a list of paths. Each path will contain the three character IATA codes representing the airports found.\n\nThe main purpose of this example is to show that we can easily extract part of a larger graph and render it graphically in a way that is easy for an end user to comprehend.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport networkx as nx\n\n# Find up to 15 routes from LHR to SJC that make one stop.\npaths = g.V().has('airport','code','LHR') \\\n .out().out().has('code','SJC').limit(15) \\\n .path().by('code').toList()\n\n# Create a new empty DiGraph\nG=nx.DiGraph()\n\n# Add the routes we found to DiGraph we just created\nfor p in paths:\n G.add_edge(p[0],p[1])\n G.add_edge(p[1],p[2])\n\n# Give the starting and ending airports a different color\ncolors = []\n\nfor label in G:\n if label in['LHR','SJC']:\n colors.append('yellow')\n else:\n colors.append('#11cc77')\n\n# Now draw the graph \nplt.figure(figsize=(5,5))\nnx.draw(G, node_color=colors, node_size=1200, with_labels=True)\nplt.show()",
"_____no_output_____"
]
],
[
[
"# PART 2 - Examples that use iPython Gremlin\n\nThis part of the notebook contains examples that use the iPython Gremlin Jupyter extension to work with a Neptune instance using Gremlin.",
"_____no_output_____"
],
[
"## Configuring iPython Gremlin to work with Neptune\n\nBefore we can start to use iPython Gremlin we need to load the Jupyter Kernel extension and configure access to our Neptune endpoint.",
"_____no_output_____"
]
],
[
[
"# Create a string containing the full Web Socket path to the endpoint\n# Replace <neptune-instance-name> with the name of your Neptune instance.\n# which will be of the form myinstance.us-east-1.neptune.amazonaws.com\n\n#neptune_endpoint = '<neptune-instance-name>'\nimport os\nneptune_endpoint = os.environ['NEPTUNE_CLUSTER_ENDPOINT']\nneptune_port = os.environ['NEPTUNE_CLUSTER_PORT']\nneptune_gremlin_endpoint = 'wss://' + neptune_endpoint + ':' + neptune_port + '/gremlin'\n\n# Load the iPython Gremlin extension and setup access to Neptune.\n%load_ext gremlin\n%gremlin.connection.set_current $neptune_gremlin_endpoint",
"_____no_output_____"
]
],
[
[
"## Run this cell if you need to reload the Gremlin extension.\nOccaisionally it becomes necessary to reload the iPython Gremlin extension to make things work. Running this cell will do that for you.",
"_____no_output_____"
]
],
[
[
"# Re-load the iPython Gremlin Jupyter Kernel extension.\n%reload_ext gremlin",
"_____no_output_____"
]
],
[
[
"## A simple query to make sure we can connect to the graph. \n\nFind all the airports in England that are in London. Notice that when using iPython Gremlin you do not need to use a terminal step such as `next` or `toList` at the end of the query in order to get it to return results. As mentioned earlier in this post, the `%reset -f` is to work around a known issue with iPython Gremlin.",
"_____no_output_____"
]
],
[
[
"%reset -f\n%gremlin g.V().has('airport','region','GB-ENG') \\\n .has('city','London').values('desc')",
"_____no_output_____"
]
],
[
[
"### You can store the results of a query in a variable just as when using Gremlin Python.\nThe query below is the same as the previous one except that the results of running the query are stored in the variable 'places'. We can then work with that variable in our code.",
"_____no_output_____"
]
],
[
[
"%reset -f\nplaces = %gremlin g.V().has('airport','region','GB-ENG') \\\n .has('city','London').values('desc')\nfor p in places:\n print(p)",
"_____no_output_____"
]
],
[
[
"### Treating entire cells as Gremlin\nAny cell that begins with `%%gremlin` tells iPython Gremlin to treat the entire cell as Gremlin. You cannot mix Python code into these cells.",
"_____no_output_____"
]
],
[
[
"%%gremlin\ng.V().has('city','London').has('region','GB-ENG').count()\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa3d2562e08035d5ccd6fd81027779889cabc7 | 883,741 | ipynb | Jupyter Notebook | Project4_NN/test_model_unseen_data_reverse.ipynb | Rendsnack/Thesis-SMSL | 17ae162401df8e8666ad2252be26148a9d18a47a | [
"MIT"
] | null | null | null | Project4_NN/test_model_unseen_data_reverse.ipynb | Rendsnack/Thesis-SMSL | 17ae162401df8e8666ad2252be26148a9d18a47a | [
"MIT"
] | null | null | null | Project4_NN/test_model_unseen_data_reverse.ipynb | Rendsnack/Thesis-SMSL | 17ae162401df8e8666ad2252be26148a9d18a47a | [
"MIT"
] | null | null | null | 417.055687 | 50,404 | 0.934294 | [
[
[
"import librosa\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\nimport os\nimport csv\nimport natsort\nfrom openpyxl import load_workbook\nimport random\nfrom random import randrange\nfrom sklearn.metrics import confusion_matrix, cohen_kappa_score\nimport seaborn as sn\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# Preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n#Keras\nimport keras\n\nfrom keras import models\nfrom keras import layers\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.utils import plot_model",
"_____no_output_____"
]
],
[
[
"LOAD DESIRED MODEL",
"_____no_output_____"
]
],
[
[
"#load certain model\nmodel = load_model('./for_old22/reverse_MFCC_Dense_Classifier_l-3_u-512_e-1000_1588062326.h5')\n# plot_model(model, to_file='reverse_MFCC_Dense_Classifier_model.png', show_shapes=True,show_layer_names=True)",
"_____no_output_____"
]
],
[
[
"LOAD TEST DATA",
"_____no_output_____"
]
],
[
[
"#read test dataset from csv\n# librispeech\ndata5_unseen_10 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data5_unseen_10ms_R.csv')\ndata5_unseen_50 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data5_unseen_50ms_R.csv')\ndata5_unseen_100 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data5_unseen_100ms_R.csv')\ndata5_unseen_500 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data5_unseen_500ms_R.csv')\ndata5_unseen_1000 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data5_unseen_1000ms_R.csv')\n\n# musan\n#music\ndata6_unseen_10 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data6_unseen_10ms_R.csv')\ndata6_unseen_50 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data6_unseen_50ms_R.csv')\ndata6_unseen_100 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data6_unseen_100ms_R.csv')\ndata6_unseen_500 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data6_unseen_500ms_R.csv')\ndata6_unseen_1000 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data6_unseen_1000ms_R.csv')\n#speech\ndata7_10 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data7_unseen_10ms_R.csv')\ndata7_50 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data7_unseen_50ms_R.csv')\ndata7_100 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data7_unseen_100ms_R.csv')\ndata7_500 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data7_unseen_500ms_R.csv')\ndata7_1000 = pd.read_csv('D:/Users/MC/Documents/UNI/MASTER/thesis/MFCC_FEATURES2/reverse_Mel_scale/data7_unseen_1000ms_R.csv')",
"_____no_output_____"
]
],
[
[
"GET TOTAL NUMBER OF FILES PER TYPE <br>\ni.e. get number of entries per dataset (5,6) OR number of entries per IR-length (10,50,100,500,1000)",
"_____no_output_____"
]
],
[
[
"investigate_differencess_between_datasets = 1\n# else investigate between IR lenght",
"_____no_output_____"
],
[
"#aggregate all data\nif investigate_differencess_between_datasets:\n L5 = len(data5_unseen_10) + len(data5_unseen_50) + len(data5_unseen_100) + len(data5_unseen_500) + len(data5_unseen_1000)\n L6 = len(data6_unseen_10) + len(data6_unseen_50) + len(data6_unseen_100) + len(data6_unseen_500) + len(data6_unseen_1000)\n L7 = len(data7_10) + len(data7_50) + len(data7_100) + len(data7_500) + len(data7_1000)\n print(f'number of music samples: {L6}')\n print(f'number of speech samples: {L5+L7} \\tof which {L5} are from Librispeech and {L7} are from Musan')\n data = pd.concat([data5_unseen_10, data5_unseen_50, data5_unseen_100, data5_unseen_500, data5_unseen_1000, data6_unseen_10, data6_unseen_50, data6_unseen_100, data6_unseen_500, data6_unseen_1000, data7_10, data7_50, data7_100, data7_500, data7_1000])\n\nelse:\n L_10 = len(data5_unseen_10) + len(data6_unseen_10) + len(data7_10)\n L_50 = len(data5_unseen_50) + len(data6_unseen_50) + len(data7_50)\n L_100 = len(data5_unseen_100) + len(data6_unseen_100) + len(data7_100)\n L_500 = len(data5_unseen_500) + len(data6_unseen_500) + len(data7_500)\n L_1000 = len(data5_unseen_1000) + len(data6_unseen_1000) + len(data7_1000)\n\n print(f'number of IR_10ms samples: {L_10}')\n print(f'number of IR_50ms samples: {L_50}')\n print(f'number of IR_100ms samples: {L_100}')\n print(f'number of IR_500ms samples: {L_500}')\n print(f'number of IR_1000ms samples: {L_1000}')\n data = pd.concat([data5_unseen_10, data6_unseen_10, data7_10, data5_unseen_50, data6_unseen_50, data7_50, data5_unseen_100, data6_unseen_100, data7_100, data5_unseen_500, data6_unseen_500, data7_500, data5_unseen_1000, data6_unseen_1000, data7_1000])\n\nprint()\nprint(f'number of rows: {data.shape[0]}')\n#randomly display some of the data\nprint('random selection of rows:')\ndata_subset = data.sample(n=5)\ndata_subset.head()",
"number of music samples: 15800\nnumber of speech samples: 16000 \tof which 10000 are from Librispeech and 6000 are from Musan\n\nnumber of rows: 31800\nrandom selection of rows:\n"
]
],
[
[
"PREPARING DATA",
"_____no_output_____"
]
],
[
[
"#dropping unneccesary columns and storing filenames elsewhere\nfileNames = data['filename']\ndata = data.drop(['filename'],axis=1)",
"_____no_output_____"
],
[
"# function to reduce label resolution from every 9° to 4 quadrants\ndef reduce_Resolution(old_data):\n new_data = old_data.iloc[:, -1]\n new_label_list = pd.DataFrame(new_data)\n for i in range(len(new_data)):\n if 0 <= new_data.iloc[i] < 90:\n new_label_list.iloc[i] = 0\n if 90 <= new_data.iloc[i] < 180:\n new_label_list.iloc[i] = 1\n if 180 <= new_data.iloc[i] < 270:\n new_label_list.iloc[i] = 2\n if 270 <= new_data.iloc[i] < 360:\n new_label_list.iloc[i] = 3\n return new_label_list",
"_____no_output_____"
],
[
"#making labels\nlabels_list = data.iloc[:, -1]\n# labels_list = reduce_Resolution(data)\nencoder = LabelEncoder()\ny = encoder.fit_transform(labels_list)\nprint(f'labels are: {y}')",
"_____no_output_____"
],
[
"# normalizing\nscaler = StandardScaler()\nX = scaler.fit_transform(np.array(data.iloc[:, :-1], dtype = float))",
"_____no_output_____"
]
],
[
[
"MAKE PREDICTIONS AND EVALUATE",
"_____no_output_____"
]
],
[
[
"#make prediction for each sample in X and evaluate entire model to get an idea of accuracy\npredictions = model.predict(X)\nfinal_predictions = np.argmax(predictions,axis=1)\ntest_loss, test_acc = model.evaluate(X,y)",
"_____no_output_____"
]
],
[
[
"COMPUTE SOME GENERAL STATISTICS",
"_____no_output_____"
]
],
[
[
"#method to get difference between elements on circular scale\ndef absolute_diff(int1,int2):\n m_min = min(int1,int2)\n m_max = max(int1,int2)\n diff1 = m_max-m_min\n diff2 = m_min + 40 - m_max\n return diff1 if diff1 <=20 else diff2",
"_____no_output_____"
],
[
"##COMPUTE STATISTICS\nlabels = y\npredictions = predictions\n#check which errors occur\noccuring_errors = np.zeros(21)\n#check which direction are misclassified most often\nhardest_to_predict = np.zeros(40)\n#check what type of files make misclassification\nindexes_of_misclassifications = []\nmisclassifications = []\n#check what type of files make the worst misclassifications\nindexes_of_grave_misclassifications = []\ngrave_misclassifications = []\n#check which datasets produces what type of errors\nall_errors_5 = np.zeros(21)\nall_errors_6 = np.zeros(21)\nall_errors_7 = np.zeros(21)\nall_errors_10 = np.zeros(21)\nall_errors_50 = np.zeros(21)\nall_errors_100 = np.zeros(21)\nall_errors_500 = np.zeros(21)\nall_errors_1000 = np.zeros(21)\n#correct direction\nall_correct = np.zeros(40)\nsum_correct = 0\nfor i in range(final_predictions.shape[0]):\n label = labels[i]\n predicted = final_predictions[i]\n error = absolute_diff(predicted,label)\n occuring_errors[error] = occuring_errors[error] + 1\n if error != 0:\n hardest_to_predict[label] += 1 \n indexes_of_misclassifications.append(i)\n misclassifications.append(fileNames.iloc[i])\n else :\n all_correct[label] += 1\n sum_correct += 1\n if error > 5:\n indexes_of_grave_misclassifications.append(i)\n grave_misclassifications.append(fileNames.iloc[i])\n if investigate_differencess_between_datasets:\n if 0 <= i < L5:\n all_errors_5[error] += 1\n elif L5 <= i < L5 + L6:\n all_errors_6[error] += 1\n elif L5 + L6 <= i < L5 + L6 + L7:\n all_errors_7[error] += 1\n else:\n if 0 <= i < L_10:\n all_errors_10[error] += 1\n elif L_10 <= i < L_10 + L_50:\n all_errors_50[error] += 1\n elif L_10 + L_50 <= i < L_10 + L_50 + L_100:\n all_errors_100[error] += 1\n elif L_10 + L_50 + L_100 <= i < L_10 + L_50 + L_100 + L_500:\n all_errors_500[error] += 1\n elif L_10 + L_50 + L_100 + L_500 <= i < L_10 + L_50 + L_100 + L_500 + L_1000:\n all_errors_1000[error] += 1\n \navg_occuring_errors = occuring_errors/(labels.shape[0])\n# avg_hardest_to_predict = hardest_to_predict/(labels.shape[0]) \navg_hardest_to_predict = hardest_to_predict/(labels.shape[0]-sum_correct) \nif investigate_differencess_between_datasets:\n avg_errors_5 = all_errors_5/L5\n avg_errors_6 = all_errors_6/L6\n avg_errors_7 = all_errors_7/L7\n AVG_errors_5 = all_errors_5/(labels.shape[0])\n AVG_errors_6 = all_errors_6/(labels.shape[0])\n AVG_errors_7 = all_errors_7/(labels.shape[0])\nelse :\n avg_errors_10 = all_errors_10/L_10\n avg_errors_50 = all_errors_50/L_50\n avg_errors_100 = all_errors_100/L_100\n avg_errors_500 = all_errors_500/L_500\n avg_errors_1000 = all_errors_1000/L_1000\n AVG_errors_10 = all_errors_10/(labels.shape[0])\n AVG_errors_50 = all_errors_50/(labels.shape[0])\n AVG_errors_100 = all_errors_100/(labels.shape[0])\n AVG_errors_500 = all_errors_500/(labels.shape[0])\n AVG_errors_1000 = all_errors_1000/(labels.shape[0])\n \nhardest_direction = np.argmax(avg_hardest_to_predict)\nindexes_of_hardes_direction = np.where(labels==hardest_direction)\nhardest_direction_confusion = np.zeros(40)\nhardest_direction_start_index = indexes_of_hardes_direction[0][0]\nhardest_direction_end_index = indexes_of_hardes_direction[0][-1]\n\n#iterate over all predictions that should have predicted 'hardest_direction' and store what they actually predicted\nfor i in range(indexes_of_hardes_direction[0][0],indexes_of_hardes_direction[0][-1]):\n predicted = np.argmax(predictions[i])\n hardest_direction_confusion[predicted] += 1 \navg_hardest_direction_confusion = hardest_direction_confusion / (hardest_direction_end_index-hardest_direction_start_index)\n\n#compute confusion matrix\nconfusion_array = confusion_matrix(y,final_predictions) #true,#predicted\n\n#compute confusion matrix if labels can be off by 27°\ntolerated_error_d = 27#degrees\nprint(f'tolerated error is {tolerated_error_d}°')\ntolerated_error = int(tolerated_error_d/9)\ntolerated_final_predictions = final_predictions\nfor i in range(final_predictions.shape[0]):\n predicition = final_predictions[i]\n label = y[i]\n error = absolute_diff(predicition,label)\n if error < tolerated_error:\n tolerated_final_predictions[i] = label\ntolerated_confusion_array = confusion_matrix(y, tolerated_final_predictions)",
"tolerated error is 27°\n"
]
],
[
[
"PLOT STATISTICS",
"_____no_output_____"
]
],
[
[
"#ERROR OCCURENCE\nx_as = np.array(range(21))\nplt.bar(x_as,avg_occuring_errors)\nplt.title('reverse model: average error occurrence on unseen data')\nplt.ylabel('%')\nplt.ylim([0,0.5])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_error_unseen_data'\nplt.savefig(f'{save_fig_file_path}.png')\nplt.show();\nerror_27 = np.sum(avg_occuring_errors[0:3])\nprint(f'{int(error_27*100)}% of predictions are correct within 27°')\nerror_45 = np.sum(avg_occuring_errors[0:5])\nprint(f'{int(error_45*100)}% of predictions are correct within 45°')\nerror_90 = np.sum(avg_occuring_errors[0:10])\nprint(f'{int(error_90*100)}% of predictions are correct within 90°')",
"_____no_output_____"
],
[
"#HARDEST TO PREDICT\nx_as = np.array(range(40))\nplt.bar(x_as,avg_hardest_to_predict)\nplt.title('reverse model: hardest directions to predict, unseen data')\nplt.ylabel('%')\nplt.ylim([0,0.05])\nplt.xlabel('angle [°]')\nplt.xticks([0,5,10,15,20,25,30,35,40], [ 0,45,90,135,180,225,270,315,360])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_hardest_dir'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show();",
"_____no_output_____"
],
[
"#CONFUSION CORRESPONDING TO HARDEST DIRECTION\nx_as = np.array(range(40))\nplt.title(f'reverse model: confusion for hardest direction to predict ({hardest_direction*9}°), unseen data')\nplt.ylabel('%')\nplt.xlabel('angle [°]')\nplt.xticks([0,5,10,15,20,25,30,35,40], [ 0,45,90,135,180,225,270,315,360])\nplt.bar(x_as,avg_hardest_direction_confusion);",
"_____no_output_____"
],
[
"#CONFUSION MATRIX\ndf_cm = pd.DataFrame(confusion_array, range(40), range(40))\nnorm_cm = df_cm.astype('float') / df_cm.sum(axis=1)[:, np.newaxis]\ndf_cm = norm_cm\nplt.figure(figsize=(22,18),dpi=120)\nsn.set(font_scale=2) # for label size\nsn.heatmap(df_cm,vmin=0,vmax=1) # font size\nplt.yticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,22,24,26,28,30,32,34,36,38], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180,198,216,234,252,270,288,306,324,342])\nplt.xlabel('predicted angle[°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,22,24,26,28,30,32,34,36,38], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180,198,216,234,252,270,288,306,324,342])\nplt.ylabel('actual angle[°]')\nplt.title('reverse model: normalized confusion matrix',fontsize=40)\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_confusion'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show()\nsn.set(font_scale=1)",
"_____no_output_____"
],
[
"#CONFUSION MATRIX\ndf_cm = pd.DataFrame(tolerated_confusion_array, range(40), range(40))\nnorm_cm = df_cm.astype('float') / df_cm.sum(axis=1)[:, np.newaxis]\ndf_cm = norm_cm\nplt.figure(figsize=(22,18),dpi=120)\nsn.set(font_scale=2) # for label size\nsn.heatmap(df_cm,vmin=0,vmax=1) # font size\nplt.yticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,22,24,26,28,30,32,34,36,38], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180,198,216,234,252,270,288,306,234,342])\nplt.xlabel('predicted angle[°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20,22,24,26,28,30,32,34,36,38], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180,198,216,234,252,270,288,306,234,342])\nplt.ylabel('actual angle[°]')\nplt.title(f'reverse model: normalized confusion matrix with toleration of {tolerated_error_d}',fontsize=40)\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_confusion_tol'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show()\nsn.set(font_scale=1)",
"_____no_output_____"
],
[
"#RANDOMLY SELECT 1 INDEX AND COMPARE THE LABEL VS THE PREDICTION\nindex = randrange(0,X.shape[0])\nlabel = y[index]\nprint(\"label:\")\nprint(label)\nprint(\"predicted:\")\nprint(np.argmax(predictions[index]))\n#linear bar plot\nplt.bar(np.arange(len(predictions[index,:])),predictions[index,:], align='center', alpha=1)\nlabels = np.zeros((40,))\nlabels[label] = np.amax(predictions[index])\nplt.bar(np.arange(len(predictions[index,:])),labels[:], align='center', alpha=1)\nplt.ylabel('%')\nplt.xlabel('label')\nplt.title('direction')\nplt.show()\n\n#polar bar plot\nN = 40\ntheta = np.linspace(0.0, 2 * np.pi, N, endpoint=False)\nwidth = np.pi / 40 \nax = plt.subplot(111, projection='polar')\nax.bar(theta, predictions[index,:], width=width, color='b', bottom=0.0, alpha=1)\nax.bar(theta, labels[:], width=width, color='g', bottom=0.0, alpha=0.5)\nr_max = np.amax(predictions[index])\nr = np.linspace(0.1*r_max, 0.8*r_max, 3)\nr = np.round(r,2)\nax.set_rticks(r)\nplt.tight_layout()\nplt.show()",
"label:\n36\npredicted:\n7\n"
],
[
"#RANDOMLY SELECT A HUNDRED SAMPLES AND PLOT THOSE WHO ARE OF BY MORE THAN 45° AND SAVE THOSE\nsave_fig_location = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/misclassifications'\ncounter = 0\nrandomIndexes = random.sample(range(0,X.shape[0]),100)\nallErrors = []\nfor index in randomIndexes:\n label = y[index]\n predicted = np.argmax(predictions[index])\n output = f'label: {label} \\t predict: {predicted}'\n error = absolute_diff(predicted,label)\n error = absolute_diff(predicted,label)\n if error != 0:\n output += f'\\t error: {error}'\n allErrors.append(error)\n print(output)\n if error >5:\n labels = np.zeros((40,))\n labels[label] = np.amax(predictions[index])\n ax = plt.subplot(111, projection='polar')\n ax.bar(theta, predictions[index,:], width=width, color='b', bottom=0.0, alpha=1)\n ax.bar(theta, labels[:], width=width, color='g', bottom=0.0, alpha=0.5)\n r_max = np.amax(predictions[index])\n r = np.linspace(0.1*r_max, 0.8*r_max, 3)\n r = np.round(r,2)\n ax.set_rticks(r)\n plt.tight_layout()\n# plt.savefig(f'{save_fig_location}/{fileNames.iloc[index]}.png')\n plt.show()\n print(fileNames.iloc[index])\n print()\n counter += 1\nprint(f'{counter} of {len(randomIndexes)} were off by more than 45°')\nallErrors = np.array(allErrors)\nm_mean = np.round(np.mean(allErrors))\nm_max = np.amax(allErrors)\nprint(f'average error is {m_mean} or {m_mean*9}°')\nprint(f'max error is {m_max} or {m_max*9}°')",
"label: 16 \t predict: 24\t error: 8\n"
]
],
[
[
"RANDOM TESTING",
"_____no_output_____"
]
],
[
[
"#types of errors\n#iterate direction per direction and see what types of errors occur\nindex = 0\nwhile y[index] == 0:\n ax = plt.subplot(111, projection='polar')\n ax.bar(theta, predictions[index,:], width=width, color='b', bottom=0.0, alpha=1)\n plt.show() \n index += 1",
"_____no_output_____"
],
[
"dirrection = 90\ndf = pd.DataFrame(data)\nsub_data = df.loc[df['label'] == dirrection]\nsub_data.head()",
"_____no_output_____"
],
[
"#making labels\nlabels_list = sub_data.iloc[:, -1]\n# labels_list = reduce_Resolution(data)\nencoder = LabelEncoder()\nsub_y = encoder.fit_transform(labels_list)\nprint(sub_y)\nprint(sub_y.shape)",
"_____no_output_____"
],
[
"# normalizing\nscaler = StandardScaler()\nsub_X = scaler.fit_transform(np.array(sub_data.iloc[:, :-1], dtype = float))",
"_____no_output_____"
],
[
"#make prediction for each sample in X and evaluate entire model to get an idea of accuracy\nsub_predictions = model.predict(X)",
"_____no_output_____"
],
[
"#randomly select a hundred samples and plot those who are of by more than 45° and save those\nsave_fig_location = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/reverse_Mel_scale/unseen'\ncounter = 0\nrandomIndexes = random.sample(range(0,sub_X.shape[0]),100)\nallErrors = []\nfor index in randomIndexes:\n label = sub_y[index]\n predicted = np.argmax(sub_predictions[index])\n output = f'label: {label} \\t predict: {predicted}'\n error = absolute_diff(predicted,label)\n error = absolute_diff(predicted,label)\n if error != 0:\n output += f'\\t error: {error}'\n allErrors.append(error)\n print(output)\n if error >5:\n smart_pred = smart_prediction(predictions[index])\n output += f'\\t smart_predict: {smart_pred}'\n smart_error = absolute_diff(smart_pred,label)\n output += f'\\t smart_error: {smart_error}'\n print(output)\n labels = np.zeros((40,))\n labels[label] = np.amax(predictions[index])\n ax = plt.subplot(111, projection='polar')\n ax.bar(theta, predictions[index,:], width=width, color='b', bottom=0.0, alpha=1)\n ax.bar(theta, labels[:], width=width, color='g', bottom=0.0, alpha=0.5)\n# plt.savefig(f'{save_fig_location}/{fileNames.iloc[index]}.png')\n plt.show()\n print(fileNames.iloc[index])\n print()\n counter += 1\nprint(f'{counter} of {len(randomIndexes)} were off by more than 45°')\nallErrors = np.array(allErrors)\nm_mean = np.round(np.mean(allErrors))\nm_max = np.amax(allErrors)\nprint(f'average error is {m_mean} or {m_mean*9}°')\nprint(f'max error is {m_max} or {m_max*9}°')",
"_____no_output_____"
],
[
"og_true = np.copy(y)\nog_predictions = final_predictions\nnew_true = np.zeros(og_true.shape[0])\nnew_predictions = np.zeros(og_predictions.shape[0])\nfor i in range(og_predictions.shape[0]):\n if og_predictions[i] & 0x1: #odd\n new_predictions[i] = int(og_predictions[i]-1)\n else : #even\n new_predictions[i] = int(og_predictions[i])\n if og_true[i] & 0x1: #odd\n new_true[i] = int(og_true[i]-1)\n else : #even\n new_true[i] = int(og_true[i])",
"_____no_output_____"
],
[
"red_confusion_array = confusion_matrix(new_true,new_predictions) #true,#predicted\nred_confusion_array.shape",
"_____no_output_____"
],
[
"#CONFUSION MATRIX\ndf_cm = pd.DataFrame(red_confusion_array, range(20), range(20))\nnorm_cm = df_cm.astype('float') / df_cm.sum(axis=1)[:, np.newaxis]\ndf_cm = norm_cm\nplt.figure(figsize=(22,18),dpi=120)\nsn.set(font_scale=2) # for label size\nsn.heatmap(df_cm,vmin=0,vmax=1) # font size\nplt.yticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,], [ 0, 36, 72, 108, 144, 180,216,252,288,324])\nplt.xlabel('predicted angle[°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18,], [ 0, 36, 72, 108, 144, 180,216,252,288,324])\nplt.ylabel('actual angle[°]')\nplt.title(f'reverse model: normalized confusion matrix ',fontsize=40)\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_confusion_reduced'\nplt.savefig(f'{save_fig_file_path}.png')\nplt.show()\nsn.set(font_scale=1)",
"_____no_output_____"
]
],
[
[
"ALL MISCLASSIFICATIONS",
"_____no_output_____"
]
],
[
[
"error_5 = 0\nerror_6 = 0\nerror_7 = 0\nfor i in range(len(indexes_of_misclassifications)):\n if 0 <= indexes_of_misclassifications[i] < L5:\n error_5 += 1\n elif L5 <= indexes_of_misclassifications[i] < L5 + L6:\n error_6 += 1\n elif L5 + L6 <= indexes_of_misclassifications[i] < L5 + L6 + L7:\n error_7 += 1\nprint('errors per dataset are..')\nprint(f'dataset5 has {error_5} total errors which is {int(100*error_5/L5)}% of this dataset')\nprint(f'dataset6 has {error_6} total errors which is {int(100*error_6/L6)}% of this dataset')\nprint()\nprint('overall picutre is ..')\nprint(f'dataset5 accounts for {int(100*error_5/len(indexes_of_misclassifications))}% of total errors')\nprint(f'dataset6 accounts for {int(100*error_6/len(indexes_of_misclassifications))}% of total errors')\nprint(f'dataset7 accounts for {int(100*error_7/len(indexes_of_misclassifications))}% of total errors')\nprint()\nprint('LATEX:')\nprint(f'dataset3 & speech & {error_5} & {int(100*error_5/L5)}\\% & {int(100*error_5/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'dataset4 & speech & {error_7} & {int(100*error_7/L7)}\\% & {int(100*error_7/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'dataset5 & music & {error_6} & {int(100*error_6/L6)}\\% & {int(100*error_6/len(indexes_of_misclassifications))}\\% \\\\\\\\')\n",
"_____no_output_____"
]
],
[
[
"GRAVE MISCLASSIFICATIONS <br>\ni.e. error > 45°",
"_____no_output_____"
]
],
[
[
"error_5_G = 0\nerror_6_G = 0\nerror_7_G = 0\nfor i in range(len(indexes_of_grave_misclassifications)):\n if 0 <= indexes_of_grave_misclassifications[i] < L5:\n error_5_G += 1\n elif L5 <= indexes_of_grave_misclassifications[i] < L5 + L6:\n error_6_G += 1\n elif L5 + L6 <= indexes_of_grave_misclassifications[i] < L5 + L6 + L7:\n error_7_G += 1\nprint('errors per dataset are..')\nprint(f'dataset5 has {error_5_G} total errors which is {int(100*error_5_G/L5)}% of this dataset')\nprint(f'dataset6 has {error_6_G} total errors which is {int(100*error_6_G/L6)}% of this dataset')\nprint(f'dataset7 has {error_7_G} total errors which is {int(100*error_7_G/L7)}% of this dataset')\nprint()\nprint('overall picutre is ..')\nprint(f'dataset5 accounts for {int(100*error_5_G/len(indexes_of_grave_misclassifications))}% of total errors')\nprint(f'dataset6 accounts for {int(100*error_6_G/len(indexes_of_grave_misclassifications))}% of total errors')\nprint(f'dataset7 accounts for {int(100*error_7_G/len(indexes_of_grave_misclassifications))}% of total errors')\nprint()\nprint('LATEX:')\nprint(f'dataset3 & speech & {error_5_G} & {int(100*error_5_G/L5)}\\% & {int(100*error_5_G/len(indexes_of_grave_misclassifications))}\\% & {int(100*error_5_G/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'dataset4 & speech & {error_7_G} & {int(100*error_7_G/L7)}\\% & {int(100*error_7_G/len(indexes_of_grave_misclassifications))}\\% & {int(100*error_7_G/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'dataset5 & music & {error_6_G} & {int(100*error_6_G/L6)}\\% & {int(100*error_6_G/len(indexes_of_grave_misclassifications))}\\% & {int(100*error_6_G/len(indexes_of_misclassifications))}\\% \\\\\\\\')\n",
"_____no_output_____"
]
],
[
[
"ERROR PER DATASET",
"_____no_output_____"
]
],
[
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_5)\nplt.title('reverse model: average error occurrence on unseen data5')\nplt.ylabel('%')\nplt.ylim([0,1])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_errors5_error_unseen_data'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show();\nerror_27 = np.sum(avg_errors_5[0:3])\nprint(f'{int(error_27*100)}% of predictions are correct within 27°')\nerror_45 = np.sum(avg_errors_5[0:5])\nprint(f'{int(error_45*100)}% of predictions are correct within 45°')\nerror_90 = np.sum(avg_errors_5[0:10])\nprint(f'{int(error_90*100)}% of predictions are correct within 90°')",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_6)\nplt.title('reverse model: average error occurrence on unseen data6')\nplt.ylabel('%')\nplt.ylim([0,1])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_errors6_error_unseen_data'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show();\nerror_27 = np.sum(avg_errors_6[0:3])\nprint(f'{int(error_27*100)}% of predictions are correct within 27°')\nerror_45 = np.sum(avg_errors_6[0:5])\nprint(f'{int(error_45*100)}% of predictions are correct within 45°')\nerror_90 = np.sum(avg_errors_6[0:10])\nprint(f'{int(error_90*100)}% of predictions are correct within 90°')",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_7)\nplt.title('reverse model: average error occurrence on unseen data7')\nplt.ylabel('%')\nplt.ylim([0,1])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_model_errors5_error_unseen_data'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show();\nerror_27 = np.sum(avg_errors_7[0:3])\nprint(f'{int(error_27*100)}% of predictions are correct within 27°')\nerror_45 = np.sum(avg_errors_7[0:5])\nprint(f'{int(error_45*100)}% of predictions are correct within 45°')\nerror_90 = np.sum(avg_errors_7[0:10])\nprint(f'{int(error_90*100)}% of predictions are correct within 90°')",
"_____no_output_____"
],
[
"df = pd.DataFrame({'dataset3':avg_errors_5, 'dataset4':avg_errors_6, 'dataset5':avg_errors_7})\ndf.plot(kind='bar', stacked=True)\nplt.title('distribution of errors between datasets')\nplt.ylabel('%')\n# plt.ylim([0,0.5])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/error_distribtuion_between_datasets'\n# plt.savefig(f'{save_fig_file_path}.png')",
"_____no_output_____"
],
[
"x = np.array(range(21))\nwidth = 0.25\nax = plt.subplots(111)\nrects1 = ax.bar(x - width/3, avg_errors_5, width, label='avg_errors_5')\nrects2 = ax.bar(x + width, avg_errors_6, width, label='avg_errors_6')\nrects3 = ax.bar(x + width/3, avg_errors_7, width, label='avg_errors_7')\nax.set_ylabel('Scores')\nax.set_title('Scores by group and gender')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n# ax.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nfig.tight_layout()",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,AVG_errors_7)\nplt.title('reverse model: average error occurrence on unseen data')\nplt.ylabel('%')\nplt.ylim([0,1])\nplt.xlabel('error [°]')\nplt.xticks([ 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20], [ 0, 18, 36, 54, 72, 90, 108, 126, 144, 162, 180])\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/rev_modeldumrror_unseen_data'\n# plt.savefig(f'{save_fig_file_path}.png')\nplt.show();",
"_____no_output_____"
]
],
[
[
"ATTEMPT TO PLOT RADAR CHART",
"_____no_output_____"
]
],
[
[
"avg_correct = all_correct/sum_correct\nx_as = np.array(range(40))",
"_____no_output_____"
],
[
"avg_correct = all_correct/sum_correct\nx_as = np.array(range(40))\nN = 40\ntheta = np.linspace(0.0, 2 * np.pi, N, endpoint=True)\nwidth = np.pi / 40 \n\nfig= plt.figure(dpi=120)\nax = fig.add_subplot(111, polar=True)\nax.plot(theta, avg_correct, '-', linewidth=2)\nax.fill(theta, avg_correct, alpha=0.25)\n# ax.set_thetagrids(angles * 180/np.pi, labels)\nplt.yticks([])\nax.set_title(\"distribution of correctly predicted directions\", y=1.1)\nax.grid(True)\nfig.tight_layout()\nsave_fig_file_path = 'D:/Users/MC/Documents/UNI/MASTER/thesis/SCRIPTURE_FIGURES/H6/correctness_distribution'\n# plt.savefig(f'{save_fig_file_path}.png')",
"_____no_output_____"
],
[
"x_as = np.array(range(40))\nplt.bar(x_as,all_correct)\nplt.show()\nN = 40\ntheta = np.linspace(0.0, 2 * np.pi, N, endpoint=True)\nwidth = np.pi / 40 \n\nfig= plt.figure(dpi=120)\nax = fig.add_subplot(111, polar=True)\nax.plot(theta, all_correct, '-', linewidth=2)\nax.fill(theta, all_correct, alpha=0.25)\n# ax.set_thetagrids(angles * 180/np.pi, labels)\nplt.yticks([])\nax.set_title(\"distribution of correctly predicted directions\", y=1.1)\nax.grid(True)\nfig.tight_layout()\nplt.show\n\ndiagonal = np.diagonal(confusion_array)\nfig= plt.figure(dpi=120)\nax = fig.add_subplot(111, polar=True)\nax.plot(theta, diagonal, '-', linewidth=2)\nax.fill(theta, diagonal, alpha=0.25)\n# ax.set_thetagrids(angles * 180/np.pi, labels)\nplt.yticks([])\nax.set_title(\"distribution of correctly predicted directions\", y=1.1)\nax.grid(True)\nfig.tight_layout()\ndiagonal-all_correct",
"_____no_output_____"
]
],
[
[
"WHAT FILES CAUSE ERRORS",
"_____no_output_____"
]
],
[
[
"libri_error = 0\ngov_error = 0\nfor i in range(len(indexes_of_misclassifications)):\n if 0 <= indexes_of_misclassifications[i] < L5:\n 0+0\n elif L5 <= indexes_of_misclassifications[i] < L5 + L6:\n 0+0\n elif L5 + L6 <= indexes_of_misclassifications[i] < L5 + L6 + L7:\n if 'us-gov' in misclassifications[i]:\n gov_error += 1\n else :\n libri_error +=1\nprint(f'total librispeech errors are {libri_error} which is {int(100*libri_error/L7)}\\% of dataset4')\nprint(f'total us-gov errors are {gov_error} which is {int(100*gov_error/L7)}\\% of dataset4')",
"_____no_output_____"
]
],
[
[
"WHAT IR LENGTHS CAUSE ERRORS",
"_____no_output_____"
]
],
[
[
"L_10_error = 0\nL_50_error = 0\nL_100_error = 0\nL_500_error = 0\nL_1000_error = 0\nfor i in range(len(indexes_of_misclassifications)):\n if 0 <= indexes_of_misclassifications[i] < L_10:\n L_10_error += 1\n elif L_10 <= indexes_of_misclassifications[i] < L_10 + L_50:\n L_50_error += 1\n elif L_10 + L_50 <= indexes_of_misclassifications[i] < L_10 + L_50 + L_100:\n L_100_error += 1\n elif L_10 + L_50 + L_100 <= indexes_of_misclassifications[i] < L_10 + L_50 + L_100 + L_500:\n L_500_error += 1\n elif L_10 + L_50 + L_100 + L_500 <= indexes_of_misclassifications[i] < L_10 + L_50 + L_100 + L_500 + L_1000:\n L_1000_error += 1\nprint('LATEX:')\nprint(f'IR_10ms & {L_10_error} & {int(100*L_10_error/L_10)}\\% & {int(100*L_10_error/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'IR_50ms & {L_50_error} & {int(100*L_50_error/L_10)}\\% & {int(100*L_50_error/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'IR_100ms & {L_100_error} & {int(100*L_100_error/L_10)}\\% & {int(100*L_100_error/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'IR_500ms & {L_500_error} & {int(100*L_500_error/L_10)}\\% & {int(100*L_500_error/len(indexes_of_misclassifications))}\\% \\\\\\\\')\nprint(f'IR_1000ms & {L_1000_error} & {int(100*L_1000_error/L_10)}\\% & {int(100*L_1000_error/len(indexes_of_misclassifications))}\\% \\\\\\\\')\n",
"LATEX:\nIR_10ms & 3146 & 48\\% & 18\\% \\\\\nIR_50ms & 3245 & 49\\% & 19\\% \\\\\nIR_100ms & 3481 & 53\\% & 20\\% \\\\\nIR_500ms & 3342 & 51\\% & 20\\% \\\\\nIR_1000ms & 3485 & 53\\% & 20\\% \\\\\n"
],
[
"#DELETE US_GOV FILES\n# df = pd.DataFrame(data)\n# df = df[~df.filename.str.contains('us-gov')]\n# data = df\n# print('random selection of rows:')\n# data_subset = data.sample(n=5)\n# data_subset.head()",
"_____no_output_____"
]
],
[
[
"TESTS ON DIFF IR LENGTHS",
"_____no_output_____"
]
],
[
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_10)\nplt.ylim([0,1]);",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_50)\nplt.ylim([0,1]);",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_100)\nplt.ylim([0,1]);",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_500)\nplt.ylim([0,1]);",
"_____no_output_____"
],
[
"x_as = np.array(range(21))\nplt.bar(x_as,avg_errors_1000)\nplt.ylim([0,1]);",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa53fdf4a249b1e228891e77af9209ce49e139 | 4,086 | ipynb | Jupyter Notebook | day3 assign.ipynb | nandhinics/letsupgrade-assignday3 | ecb1c5de34ff98463d6d177861ccf8db232f7d33 | [
"Apache-2.0"
] | null | null | null | day3 assign.ipynb | nandhinics/letsupgrade-assignday3 | ecb1c5de34ff98463d6d177861ccf8db232f7d33 | [
"Apache-2.0"
] | null | null | null | day3 assign.ipynb | nandhinics/letsupgrade-assignday3 | ecb1c5de34ff98463d6d177861ccf8db232f7d33 | [
"Apache-2.0"
] | null | null | null | 18.916667 | 66 | 0.403084 | [
[
[
"x=input(\"Enter the altitude\")\nx= int(x)\nif (x<=1000):\n print (\"Safe to land\")\nelif (x>1000) and (x<=5000):\n print (\"Bring down to 1000\")\nelse:\n print (\"Turn around\")\n",
"Enter the altitude1000\nSafe to land\n"
],
[
"x=input(\"Enter the altitude\")\nx= int(x)\nif (x<=1000):\n print (\"Safe to land\")\nelif (x>1000) and (x<=5000):\n print (\"Bring down to 1000\")\nelse:\n print (\"Turn around\")\n",
"Enter the altitude4500\nBring down to 1000\n"
],
[
"x=input(\"Enter the altitude\")\nx= int(x)\nif (x<=1000):\n print (\"Safe to land\")\nelif (x>1000) and (x<=5000):\n print (\"Bring down to 1000\")\nelse:\n print (\"Turn around\")\n",
"Enter the altitude6500\nTurn around\n"
],
[
"a = 1\nb = 200\n\nprint(\"Prime numbers between\", a, \"and\", b, \"are:\")\n\nfor x in range(a, b + 1):\n if x > 1:\n for i in range(2, x):\n if (x % i) == 0:\n break\n else:\n print(x)",
"Prime numbers between 1 and 200 are:\n2\n3\n5\n7\n11\n13\n17\n19\n23\n29\n31\n37\n41\n43\n47\n53\n59\n61\n67\n71\n73\n79\n83\n89\n97\n101\n103\n107\n109\n113\n127\n131\n137\n139\n149\n151\n157\n163\n167\n173\n179\n181\n191\n193\n197\n199\n"
],
[
"\nfirst = 1042000\nlast = 702648265\n\nfor number in range(first, last + 1):\n orders = len(str(number))\n \n sum = 0\n\n temp = number\n while temp > 0:\n digits = temp % 10\n sum += digits ** orders\n temp //= 10\n\n if number == sum:\n print(\"The first armstrong number=\",number)\n break\n",
"The first armstrong number= 1741725\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa5657947689afdb0a749cbf2e4b57ec793feb | 20,899 | ipynb | Jupyter Notebook | GitHub_MD_rendering/Operators.ipynb | kyaiooiayk/Python-Programming | b70dde24901cd24b38e2ead7c9a1b2d1808fc4b0 | [
"OLDAP-2.3"
] | null | null | null | GitHub_MD_rendering/Operators.ipynb | kyaiooiayk/Python-Programming | b70dde24901cd24b38e2ead7c9a1b2d1808fc4b0 | [
"OLDAP-2.3"
] | null | null | null | GitHub_MD_rendering/Operators.ipynb | kyaiooiayk/Python-Programming | b70dde24901cd24b38e2ead7c9a1b2d1808fc4b0 | [
"OLDAP-2.3"
] | null | null | null | 25.517705 | 269 | 0.498684 | [
[
[
"# Introduction",
"_____no_output_____"
],
[
"\n**What?** Operators\n\n",
"_____no_output_____"
],
[
"# Basic Python Semantics: Operators",
"_____no_output_____"
],
[
"In the previous section, we began to look at the semantics of Python variables and objects; here we'll dig into the semantics of the various *operators* included in the language.\nBy the end of this section, you'll have the basic tools to begin comparing and operating on data in Python.",
"_____no_output_____"
],
[
"## Arithmetic Operations\nPython implements seven basic binary arithmetic operators, two of which can double as unary operators.\nThey are summarized in the following table:\n\n| Operator | Name | Description |\n|--------------|----------------|--------------------------------------------------------|\n| ``a + b`` | Addition | Sum of ``a`` and ``b`` |\n| ``a - b`` | Subtraction | Difference of ``a`` and ``b`` |\n| ``a * b`` | Multiplication | Product of ``a`` and ``b`` |\n| ``a / b`` | True division | Quotient of ``a`` and ``b`` |\n| ``a // b`` | Floor division | Quotient of ``a`` and ``b``, removing fractional parts |\n| ``a % b`` | Modulus | Integer remainder after division of ``a`` by ``b`` |\n| ``a ** b`` | Exponentiation | ``a`` raised to the power of ``b`` |\n| ``-a`` | Negation | The negative of ``a`` |\n| ``+a`` | Unary plus | ``a`` unchanged (rarely used) |\n\nThese operators can be used and combined in intuitive ways, using standard parentheses to group operations.\nFor example:",
"_____no_output_____"
]
],
[
[
"# addition, subtraction, multiplication\n(4 + 8) * (6.5 - 3)",
"_____no_output_____"
]
],
[
[
"Floor division is true division with fractional parts truncated:",
"_____no_output_____"
]
],
[
[
"# True division\nprint(11 / 2)",
"5.5\n"
],
[
"# Floor division\nprint(11 // 2)",
"5\n"
]
],
[
[
"The floor division operator was added in Python 3; you should be aware if working in Python 2 that the standard division operator (``/``) acts like floor division for integers and like true division for floating-point numbers.\n\nFinally, I'll mention an eighth arithmetic operator that was added in Python 3.5: the ``a @ b`` operator, which is meant to indicate the *matrix product* of ``a`` and ``b``, for use in various linear algebra packages.",
"_____no_output_____"
],
[
"## Bitwise Operations\nIn addition to the standard numerical operations, Python includes operators to perform bitwise logical operations on integers.\nThese are much less commonly used than the standard arithmetic operations, but it's useful to know that they exist.\nThe six bitwise operators are summarized in the following table:\n\n| Operator | Name | Description |\n|--------------|-----------------|---------------------------------------------|\n| ``a & b`` | Bitwise AND | Bits defined in both ``a`` and ``b`` |\n| <code>a | b</code>| Bitwise OR | Bits defined in ``a`` or ``b`` or both |\n| ``a ^ b`` | Bitwise XOR | Bits defined in ``a`` or ``b`` but not both |\n| ``a << b`` | Bit shift left | Shift bits of ``a`` left by ``b`` units |\n| ``a >> b`` | Bit shift right | Shift bits of ``a`` right by ``b`` units |\n| ``~a`` | Bitwise NOT | Bitwise negation of ``a`` |\n\nThese bitwise operators only make sense in terms of the binary representation of numbers, which you can see using the built-in ``bin`` function:",
"_____no_output_____"
]
],
[
[
"bin(10)",
"_____no_output_____"
]
],
[
[
"The result is prefixed with ``'0b'``, which indicates a binary representation.\nThe rest of the digits indicate that the number 10 is expressed as the sum $1 \\cdot 2^3 + 0 \\cdot 2^2 + 1 \\cdot 2^1 + 0 \\cdot 2^0$.\nSimilarly, we can write:",
"_____no_output_____"
]
],
[
[
"bin(4)",
"_____no_output_____"
]
],
[
[
"Now, using bitwise OR, we can find the number which combines the bits of 4 and 10:",
"_____no_output_____"
]
],
[
[
"4 | 10",
"_____no_output_____"
],
[
"bin(4 | 10)",
"_____no_output_____"
]
],
[
[
"These bitwise operators are not as immediately useful as the standard arithmetic operators, but it's helpful to see them at least once to understand what class of operation they perform.\nIn particular, users from other languages are sometimes tempted to use XOR (i.e., ``a ^ b``) when they really mean exponentiation (i.e., ``a ** b``).",
"_____no_output_____"
],
[
"## Assignment Operations\nWe've seen that variables can be assigned with the \"``=``\" operator, and the values stored for later use. For example:",
"_____no_output_____"
]
],
[
[
"a = 24\nprint(a)",
"24\n"
]
],
[
[
"We can use these variables in expressions with any of the operators mentioned earlier.\nFor example, to add 2 to ``a`` we write:",
"_____no_output_____"
]
],
[
[
"a + 2",
"_____no_output_____"
]
],
[
[
"We might want to update the variable ``a`` with this new value; in this case, we could combine the addition and the assignment and write ``a = a + 2``.\nBecause this type of combined operation and assignment is so common, Python includes built-in update operators for all of the arithmetic operations:",
"_____no_output_____"
]
],
[
[
"a += 2 # equivalent to a = a + 2\nprint(a)",
"26\n"
]
],
[
[
"There is an augmented assignment operator corresponding to each of the binary operators listed earlier; in brief, they are:\n\n|||||\n|-|-|\n|``a += b``| ``a -= b``|``a *= b``| ``a /= b``|\n|``a //= b``| ``a %= b``|``a **= b``|``a &= b``|\n|<code>a |= b</code>| ``a ^= b``|``a <<= b``| ``a >>= b``|\n\nEach one is equivalent to the corresponding operation followed by assignment: that is, for any operator \"``■``\", the expression ``a ■= b`` is equivalent to ``a = a ■ b``, with a slight catch.\nFor mutable objects like lists, arrays, or DataFrames, these augmented assignment operations are actually subtly different than their more verbose counterparts: they modify the contents of the original object rather than creating a new object to store the result.",
"_____no_output_____"
],
[
"## Comparison Operations\n\nAnother type of operation which can be very useful is comparison of different values.\nFor this, Python implements standard comparison operators, which return Boolean values ``True`` and ``False``.\nThe comparison operations are listed in the following table:\n\n| Operation | Description || Operation | Description |\n|---------------|-----------------------------------||---------------|--------------------------------------|\n| ``a == b`` | ``a`` equal to ``b`` || ``a != b`` | ``a`` not equal to ``b`` |\n| ``a < b`` | ``a`` less than ``b`` || ``a > b`` | ``a`` greater than ``b`` |\n| ``a <= b`` | ``a`` less than or equal to ``b`` || ``a >= b`` | ``a`` greater than or equal to ``b`` |\n\nThese comparison operators can be combined with the arithmetic and bitwise operators to express a virtually limitless range of tests for the numbers.\nFor example, we can check if a number is odd by checking that the modulus with 2 returns 1:",
"_____no_output_____"
]
],
[
[
"# 25 is odd\n25 % 2 == 1",
"_____no_output_____"
],
[
"# 66 is odd\n66 % 2 == 1",
"_____no_output_____"
]
],
[
[
"We can string-together multiple comparisons to check more complicated relationships:",
"_____no_output_____"
]
],
[
[
"# check if a is between 15 and 30\na = 25\n15 < a < 30",
"_____no_output_____"
]
],
[
[
"And, just to make your head hurt a bit, take a look at this comparison:",
"_____no_output_____"
]
],
[
[
"-1 == ~0",
"_____no_output_____"
]
],
[
[
"Recall that ``~`` is the bit-flip operator, and evidently when you flip all the bits of zero you end up with -1.\nIf you're curious as to why this is, look up the *two's complement* integer encoding scheme, which is what Python uses to encode signed integers, and think about what happens when you start flipping all the bits of integers encoded this way.",
"_____no_output_____"
],
[
"## Boolean Operations\nWhen working with Boolean values, Python provides operators to combine the values using the standard concepts of \"and\", \"or\", and \"not\".\nPredictably, these operators are expressed using the words ``and``, ``or``, and ``not``:",
"_____no_output_____"
]
],
[
[
"x = 4\n(x < 6) and (x > 2)",
"_____no_output_____"
],
[
"(x > 10) or (x % 2 == 0)",
"_____no_output_____"
],
[
"not (x < 6)",
"_____no_output_____"
]
],
[
[
"Boolean algebra aficionados might notice that the XOR operator is not included; this can of course be constructed in several ways from a compound statement of the other operators.\nOtherwise, a clever trick you can use for XOR of Boolean values is the following:",
"_____no_output_____"
]
],
[
[
"# (x > 1) xor (x < 10)\n(x > 1) != (x < 10)",
"_____no_output_____"
]
],
[
[
"These sorts of Boolean operations will become extremely useful when we begin discussing *control flow statements* such as conditionals and loops.\n\nOne sometimes confusing thing about the language is when to use Boolean operators (``and``, ``or``, ``not``), and when to use bitwise operations (``&``, ``|``, ``~``).\nThe answer lies in their names: Boolean operators should be used when you want to compute *Boolean values (i.e., truth or falsehood) of entire statements*.\nBitwise operations should be used when you want to *operate on individual bits or components of the objects in question*.",
"_____no_output_____"
],
[
"## Identity and Membership Operators\n\nLike ``and``, ``or``, and ``not``, Python also contains prose-like operators to check for identity and membership.\nThey are the following:\n\n| Operator | Description |\n|---------------|---------------------------------------------------|\n| ``a is b`` | True if ``a`` and ``b`` are identical objects |\n| ``a is not b``| True if ``a`` and ``b`` are not identical objects |\n| ``a in b`` | True if ``a`` is a member of ``b`` |\n| ``a not in b``| True if ``a`` is not a member of ``b`` |",
"_____no_output_____"
],
[
"### Identity Operators: \"``is``\" and \"``is not``\"\n\nThe identity operators, \"``is``\" and \"``is not``\" check for *object identity*.\nObject identity is different than equality, as we can see here:",
"_____no_output_____"
]
],
[
[
"a = [1, 2, 3]\nb = [1, 2, 3]",
"_____no_output_____"
],
[
"a == b",
"_____no_output_____"
],
[
"a is b",
"_____no_output_____"
],
[
"a is not b",
"_____no_output_____"
]
],
[
[
"What do identical objects look like? Here is an example:",
"_____no_output_____"
]
],
[
[
"a = [1, 2, 3]\nb = a\na is b",
"_____no_output_____"
]
],
[
[
"The difference between the two cases here is that in the first, ``a`` and ``b`` point to *different objects*, while in the second they point to the *same object*.\nAs we saw in the previous section, Python variables are pointers. The \"``is``\" operator checks whether the two variables are pointing to the same container (object), rather than referring to what the container contains.\nWith this in mind, in most cases that a beginner is tempted to use \"``is``\" what they really mean is ``==``.",
"_____no_output_____"
],
[
"### Membership operators\nMembership operators check for membership within compound objects.\nSo, for example, we can write:",
"_____no_output_____"
]
],
[
[
"1 in [1, 2, 3]",
"_____no_output_____"
],
[
"2 not in [1, 2, 3]",
"_____no_output_____"
]
],
[
[
"# References",
"_____no_output_____"
],
[
"\n- [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp)\n\n",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7fa604752370c08e6880024098cc70cb3b28a4a | 40,766 | ipynb | Jupyter Notebook | Network Evaluation Examples/Network Evaluation Example-BioPlex.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | Network Evaluation Examples/Network Evaluation Example-BioPlex.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | Network Evaluation Examples/Network Evaluation Example-BioPlex.ipynb | jdtibochab/network_bisb | 7adcab15c2e8ed79123153f8de38d159d103f999 | [
"MIT"
] | null | null | null | 56.30663 | 2,156 | 0.49632 | [
[
[
"import sys\nsys.path.append('/home/juan/Network_Evaluation_Tools')\n\nfrom network_evaluation_tools import data_import_tools as dit\nfrom network_evaluation_tools import network_evaluation_functions as nef\nfrom network_evaluation_tools import network_propagation as prop\nimport pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"# Load network (We choose a smaller network here for the example's sake)\nnetwork = dit.load_network_file('/home/juan/Network_Evaluation_Tools/Data/BioPlex_Symbol.sif', verbose=True)\nlen(network.edges)",
"('Network File Loaded:', '/home/juan/Network_Evaluation_Tools/Data/BioPlex_Symbol.sif')\n"
],
[
"# Load gene sets for analysis\ngenesets = dit.load_node_sets('/home/juan/Network_Evaluation_Tools/Data/DisGeNET_genesets.txt')",
"_____no_output_____"
],
[
"# Calculate geneset sub-sample rate\ngenesets_p = nef.calculate_p(network, genesets)",
"_____no_output_____"
],
[
"# Determine optimal alpha for network (can also be done automatically by next step)\nalpha = prop.calculate_alpha(network)\nprint(alpha)",
"0.609\n"
],
[
"# Calculate network kernel for propagation\nkernel = nef.construct_prop_kernel(network, alpha=alpha, verbose=True)",
"Alpha: 0.609\n"
],
[
"# Calculate the AUPRC values for each gene set\nAUPRC_values = nef.small_network_AUPRC_wrapper(kernel, genesets, genesets_p, n=30, cores=4, verbose=True)",
"_____no_output_____"
]
],
[
[
"**Note about the above cell:** There are a several options for this particular step depending on the computational resources available and network size. If the network is sufficiently small (<250k edges), it is recommended to use the 'small_network_AUPRC_wrapper' function as it can be much faster, especially when run in parallel (at least 8G per core is recommended). If you would like to parallelize the AUPRC calculation with a larger network (between 250K and 2.5M edges), at least 16G per core is recommended, 32G per core if the network contains more than 2.5M edges. For larger networks, it is recommended to use the 'large_network_AUPRC_wrapper', which may be a slightly slower function, but more equipped to handle the larger memory footprint required. To change the parllelization status of the function, change the 'cores' option to the number of threads you would like to utilize.",
"_____no_output_____"
]
],
[
[
"# Construct null networks and calculate the AUPRC of the gene sets of the null networks\n# We can use the AUPRC wrapper function for this\nnull_AUPRCs = []\nfor i in range(10):\n shuffNet = nef.shuffle_network(network, max_tries_n=10, verbose=True)\n shuffNet_kernel = nef.construct_prop_kernel(shuffNet, alpha=alpha, verbose=False)\n shuffNet_AUPRCs = nef.small_network_AUPRC_wrapper(shuffNet_kernel, genesets, genesets_p, n=30, cores=4, verbose=False)\n null_AUPRCs.append(shuffNet_AUPRCs)\n print ('shuffNet', repr(i+1), 'AUPRCs calculated')",
"_____no_output_____"
]
],
[
[
"**Note about the above cell:** We use a small number to calculate the null AUPRC values, but a larger number of shuffled networks may give a better representation of the true null AUPRC value. smaller number of networks here for this example, but larger numbers can be used, especially if the resulting distribution of null AUPRCs has a high variance relative to the actual AUPRC values, but we have found that the variance remains relatively small even with a small number of shuffled networks.",
"_____no_output_____"
]
],
[
[
"# Construct table of null AUPRCs\nnull_AUPRCs_table = pd.concat(null_AUPRCs, axis=1)\nnull_AUPRCs_table.columns = ['shuffNet'+repr(i+1) for i in range(len(null_AUPRCs))]",
"_____no_output_____"
],
[
"# Calculate performance metric of gene sets\nnetwork_performance = nef.calculate_network_performance_score(AUPRC_values, null_AUPRCs_table, verbose=True)\nnetwork_performance.name = 'Test Network'",
"_____no_output_____"
],
[
"# Calculate network performance gain over median null AUPRC\nnetwork_perf_gain = nef.calculate_network_performance_gain(AUPRC_values, null_AUPRCs_table, verbose=True)\nnetwork_perf_gain.name = 'Test Network'",
"_____no_output_____"
],
[
"network_performance",
"_____no_output_____"
],
[
"# Rank network on average performance across gene sets vs performance on same gene sets in previous network set\nall_network_performance = pd.read_csv('~/Data/Network_Performance.csv', index_col=0)\nall_network_performance_filt = pd.concat([network_performance, all_network_performance.ix[network_performance.index]], axis=1)\nnetwork_performance_rank_table = all_network_performance_filt.rank(axis=1, ascending=False)\nnetwork_performance_rankings = network_performance_rank_table['Test Network']",
"_____no_output_____"
],
[
"# Rank network on average performance gain across gene sets vs performance gain on same gene sets in previous network set\nall_network_perf_gain = pd.read_csv('~/Data/Network_Performance_Gain.csv', index_col=0)\nall_network_perf_gain_filt = pd.concat([network_perf_gain, all_network_perf_gain.ix[network_perf_gain.index]], axis=1)\nnetwork_perf_gain_rank_table = all_network_performance_filt.rank(axis=1, ascending=False)\nnetwork_perf_gain_rankings = network_perf_gain_rank_table['Test Network']",
"_____no_output_____"
],
[
"# Network Performance\nnetwork_performance_metric_ranks = pd.concat([network_performance, network_performance_rankings, network_perf_gain, network_perf_gain_rankings], axis=1)\nnetwork_performance_metric_ranks.columns = ['Network Performance', 'Network Performance Rank', 'Network Performance Gain', 'Network Performance Gain Rank']\nnetwork_performance_metric_ranks.sort_values(by=['Network Performance Rank', 'Network Performance', 'Network Performance Gain Rank', 'Network Performance Gain'],\n ascending=[True, False, True, False])",
"_____no_output_____"
],
[
"# Construct network summary table\nnetwork_summary = {}\nnetwork_summary['Nodes'] = int(len(network.nodes()))\nnetwork_summary['Edges'] = int(len(network.edges()))\nnetwork_summary['Avg Node Degree'] = np.mean(network.degree().values())\nnetwork_summary['Edge Density'] = 2*network_summary['Edges'] / float((network_summary['Nodes']*(network_summary['Nodes']-1)))\nnetwork_summary['Avg Network Performance Rank'] = network_performance_rankings.mean()\nnetwork_summary['Avg Network Performance Rank, Rank'] = int(network_performance_rank_table.mean().rank().ix['Test Network'])\nnetwork_summary['Avg Network Performance Gain Rank'] = network_perf_gain_rankings.mean()\nnetwork_summary['Avg Network Performance Gain Rank, Rank'] = int(network_perf_gain_rank_table.mean().rank().ix['Test Network'])\nfor item in ['Nodes', 'Edges' ,'Avg Node Degree', 'Edge Density', 'Avg Network Performance Rank', 'Avg Network Performance Rank, Rank',\n 'Avg Network Performance Gain Rank', 'Avg Network Performance Gain Rank, Rank']:\n print item+':\\t'+repr(network_summary[item])",
"Nodes:\t9432\nEdges:\t151352\nAvg Node Degree:\t32.093299406276508\nEdge Density:\t0.0034029582659608213\nAvg Network Performance Rank:\t6.53125\nAvg Network Performance Rank, Rank:\t7\nAvg Network Performance Gain Rank:\t6.53125\nAvg Network Performance Gain Rank, Rank:\t7\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa6aed425a22a72dca673294c390ae24b6ba26 | 62,335 | ipynb | Jupyter Notebook | README.ipynb | kaiserho/gym-anytrading | 18d9227ac42cdb422813512dcffa56a450bc83bf | [
"MIT"
] | 1,059 | 2019-09-22T00:05:12.000Z | 2022-03-31T17:18:17.000Z | README.ipynb | kaiserho/gym-anytrading | 18d9227ac42cdb422813512dcffa56a450bc83bf | [
"MIT"
] | 63 | 2020-01-29T21:15:25.000Z | 2022-03-28T22:14:55.000Z | README.ipynb | kaiserho/gym-anytrading | 18d9227ac42cdb422813512dcffa56a450bc83bf | [
"MIT"
] | 311 | 2019-10-09T11:48:39.000Z | 2022-03-31T23:10:19.000Z | 140.394144 | 22,638 | 0.860319 | [
[
[
"# gym-anytrading\r\n\r\n`AnyTrading` is a collection of [OpenAI Gym](https://github.com/openai/gym) environments for reinforcement learning-based trading algorithms.\r\n\r\nTrading algorithms are mostly implemented in two markets: [FOREX](https://en.wikipedia.org/wiki/Foreign_exchange_market) and [Stock](https://en.wikipedia.org/wiki/Stock). AnyTrading aims to provide some Gym environments to improve and facilitate the procedure of developing and testing RL-based algorithms in this area. This purpose is obtained by implementing three Gym environments: **TradingEnv**, **ForexEnv**, and **StocksEnv**.\r\n\r\nTradingEnv is an abstract environment which is defined to support all kinds of trading environments. ForexEnv and StocksEnv are simply two environments that inherit and extend TradingEnv. In the future sections, more explanations will be given about them but before that, some environment properties should be discussed.\r\n\r\n**Note:** For experts, it is recommended to check out the [gym-mtsim](https://github.com/AminHP/gym-mtsim) project.\r\n\r\n## Installation\r\n\r\n### Via PIP\r\n```bash\r\npip install gym-anytrading\r\n```\r\n\r\n### From Repository\r\n```bash\r\ngit clone https://github.com/AminHP/gym-anytrading\r\ncd gym-anytrading\r\npip install -e .\r\n\r\n## or\r\n\r\npip install --upgrade --no-deps --force-reinstall https://github.com/AminHP/gym-anytrading/archive/master.zip\r\n```\r\n\r\n## Environment Properties\r\nFirst of all, **you can't simply expect an RL agent to do everything for you and just sit back on your chair in such complex trading markets!**\r\nThings need to be simplified as much as possible in order to let the agent learn in a faster and more efficient way. In all trading algorithms, the first thing that should be done is to define **actions** and **positions**. In the two following subsections, I will explain these actions and positions and how to simplify them.\r\n\r\n### Trading Actions\r\nIf you search on the Internet for trading algorithms, you will find them using numerous actions such as **Buy**, **Sell**, **Hold**, **Enter**, **Exit**, etc.\r\nReferring to the first statement of this section, a typical RL agent can only solve a part of the main problem in this area. If you work in trading markets you will learn that deciding whether to hold, enter, or exit a pair (in FOREX) or stock (in Stocks) is a statistical decision depending on many parameters such as your budget, pairs or stocks you trade, your money distribution policy in multiple markets, etc. It's a massive burden for an RL agent to consider all these parameters and may take years to develop such an agent! In this case, you certainly will not use this environment but you will extend your own.\r\n\r\nSo after months of work, I finally found out that these actions just make things complicated with no real positive impact. In fact, they just increase the learning time and an action like **Hold** will be barely used by a well-trained agent because it doesn't want to miss a single penny. Therefore there is no need to have such numerous actions and only `Sell=0` and `Buy=1` actions are adequate to train an agent just as well.\r\n\r\n### Trading Positions\r\nIf you're not familiar with trading positions, refer [here](https://en.wikipedia.org/wiki/Position_\\(finance\\)). It's a very important concept and you should learn it as soon as possible.\r\n\r\nIn a simple vision: **Long** position wants to buy shares when prices are low and profit by sticking with them while their value is going up, and **Short** position wants to sell shares with high value and use this value to buy shares at a lower value, keeping the difference as profit.\r\n\r\nAgain, in some trading algorithms, you may find numerous positions such as **Short**, **Long**, **Flat**, etc. As discussed earlier, I use only `Short=0` and `Long=1` positions.\r\n\r\n## Trading Environments\r\nAs I noticed earlier, now it's time to introduce the three environments. Before creating this project, I spent so much time to search for a simple and flexible Gym environment for any trading market but didn't find one. They were almost a bunch of complex codes with many unclear parameters that you couldn't simply look at them and comprehend what's going on. So I concluded to implement this project with a great focus on simplicity, flexibility, and comprehensiveness.\r\n\r\nIn the three following subsections, I will introduce our trading environments and in the next section, some IPython examples will be mentioned and briefly explained.\r\n\r\n### TradingEnv\r\nTradingEnv is an abstract class which inherits `gym.Env`. This class aims to provide a general-purpose environment for all kinds of trading markets. Here I explain its public properties and methods. But feel free to take a look at the complete [source code](https://github.com/AminHP/gym-anytrading/blob/master/gym_anytrading/envs/trading_env.py).\r\n\r\n* Properties:\r\n> `df`: An abbreviation for **DataFrame**. It's a **pandas'** DataFrame which contains your dataset and is passed in the class' constructor.\r\n>\r\n> `prices`: Real prices over time. Used to calculate profit and render the environment.\r\n>\r\n> `signal_features`: Extracted features over time. Used to create *Gym observations*.\r\n>\r\n> `window_size`: Number of ticks (current and previous ticks) returned as a *Gym observation*. It is passed in the class' constructor.\r\n>\r\n> `action_space`: The *Gym action_space* property. Containing discrete values of **0=Sell** and **1=Buy**.\r\n>\r\n> `observation_space`: The *Gym observation_space* property. Each observation is a window on `signal_features` from index **current_tick - window_size + 1** to **current_tick**. So `_start_tick` of the environment would be equal to `window_size`. In addition, initial value for `_last_trade_tick` is **window_size - 1** .\r\n>\r\n> `shape`: Shape of a single observation.\r\n>\r\n> `history`: Stores the information of all steps.\r\n\r\n* Methods:\r\n> `seed`: Typical *Gym seed* method.\r\n>\r\n> `reset`: Typical *Gym reset* method.\r\n>\r\n> `step`: Typical *Gym step* method.\r\n>\r\n> `render`: Typical *Gym render* method. Renders the information of the environment's current tick.\r\n>\r\n> `render_all`: Renders the whole environment.\r\n>\r\n> `close`: Typical *Gym close* method.\r\n\r\n* Abstract Methods:\r\n> `_process_data`: It is called in the constructor and returns `prices` and `signal_features` as a tuple. In different trading markets, different features need to be obtained. So this method enables our TradingEnv to be a general-purpose environment and specific features can be returned for specific environments such as *FOREX*, *Stocks*, etc.\r\n>\r\n> `_calculate_reward`: The reward function for the RL agent.\r\n>\r\n> `_update_profit`: Calculates and updates total profit which the RL agent has achieved so far. Profit indicates the amount of units of currency you have achieved by starting with *1.0* unit (Profit = FinalMoney / StartingMoney).\r\n>\r\n> `max_possible_profit`: The maximum possible profit that an RL agent can obtain regardless of trade fees.\r\n\r\n### ForexEnv\r\nThis is a concrete class which inherits TradingEnv and implements its abstract methods. Also, it has some specific properties for the *FOREX* market. For more information refer to the [source code](https://github.com/AminHP/gym-anytrading/blob/master/gym_anytrading/envs/forex_env.py).\r\n\r\n* Properties:\r\n> `frame_bound`: A tuple which specifies the start and end of `df`. It is passed in the class' constructor.\r\n>\r\n> `unit_side`: Specifies the side you start your trading. Containing string values of **left** (default value) and **right**. As you know, there are two sides in a currency pair in *FOREX*. For example in the *EUR/USD* pair, when you choose the `left` side, your currency unit is *EUR* and you start your trading with 1 EUR. It is passed in the class' constructor.\r\n>\r\n> `trade_fee`: A default constant fee which is subtracted from the real prices on every trade.\r\n\r\n\r\n### StocksEnv\r\nSame as ForexEnv but for the *Stock* market. For more information refer to the [source code](https://github.com/AminHP/gym-anytrading/blob/master/gym_anytrading/envs/stocks_env.py).\r\n\r\n* Properties:\r\n> `frame_bound`: A tuple which specifies the start and end of `df`. It is passed in the class' constructor.\r\n>\r\n> `trade_fee_bid_percent`: A default constant fee percentage for bids. For example with trade_fee_bid_percent=0.01, you will lose 1% of your money every time you sell your shares.\r\n>\r\n> `trade_fee_ask_percent`: A default constant fee percentage for asks. For example with trade_fee_ask_percent=0.005, you will lose 0.5% of your money every time you buy some shares.\r\n\r\nBesides, you can create your own customized environment by extending TradingEnv or even ForexEnv or StocksEnv with your desired policies for calculating reward, profit, fee, etc.\r\n\r\n## Examples\r\n",
"_____no_output_____"
],
[
"### Create an environment",
"_____no_output_____"
]
],
[
[
"import gym\nimport gym_anytrading\n\nenv = gym.make('forex-v0')\n# env = gym.make('stocks-v0')\n",
"_____no_output_____"
]
],
[
[
"- This will create the default environment. You can change any parameters such as dataset, frame_bound, etc.",
"_____no_output_____"
],
[
"### Create an environment with custom parameters\nI put two default datasets for [*FOREX*](https://github.com/AminHP/gym-anytrading/blob/master/gym_anytrading/datasets/data/FOREX_EURUSD_1H_ASK.csv) and [*Stocks*](https://github.com/AminHP/gym-anytrading/blob/master/gym_anytrading/datasets/data/STOCKS_GOOGL.csv) but you can use your own.",
"_____no_output_____"
]
],
[
[
"from gym_anytrading.datasets import FOREX_EURUSD_1H_ASK, STOCKS_GOOGL\n\ncustom_env = gym.make('forex-v0',\n df = FOREX_EURUSD_1H_ASK,\n window_size = 10,\n frame_bound = (10, 300),\n unit_side = 'right')\n\n# custom_env = gym.make('stocks-v0',\n# df = STOCKS_GOOGL,\n# window_size = 10,\n# frame_bound = (10, 300))",
"_____no_output_____"
]
],
[
[
"- It is to be noted that the first element of `frame_bound` should be greater than or equal to `window_size`.",
"_____no_output_____"
],
[
"### Print some information",
"_____no_output_____"
]
],
[
[
"print(\"env information:\")\nprint(\"> shape:\", env.shape)\nprint(\"> df.shape:\", env.df.shape)\nprint(\"> prices.shape:\", env.prices.shape)\nprint(\"> signal_features.shape:\", env.signal_features.shape)\nprint(\"> max_possible_profit:\", env.max_possible_profit())\n\nprint()\nprint(\"custom_env information:\")\nprint(\"> shape:\", custom_env.shape)\nprint(\"> df.shape:\", env.df.shape)\nprint(\"> prices.shape:\", custom_env.prices.shape)\nprint(\"> signal_features.shape:\", custom_env.signal_features.shape)\nprint(\"> max_possible_profit:\", custom_env.max_possible_profit())",
"env information:\n> shape: (24, 2)\n> df.shape: (6225, 5)\n> prices.shape: (6225,)\n> signal_features.shape: (6225, 2)\n> max_possible_profit: 4.054414887146586\n\ncustom_env information:\n> shape: (10, 2)\n> df.shape: (6225, 5)\n> prices.shape: (300,)\n> signal_features.shape: (300, 2)\n> max_possible_profit: 1.122900180008982\n"
]
],
[
[
"- Here `max_possible_profit` signifies that if the market didn't have trade fees, you could have earned **4.054414887146586** (or **1.122900180008982**) units of currency by starting with **1.0**. In other words, your money is almost *quadrupled*.",
"_____no_output_____"
],
[
"### Plot the environment",
"_____no_output_____"
]
],
[
[
"env.reset()\nenv.render()",
"_____no_output_____"
]
],
[
[
"- **Short** and **Long** positions are shown in `red` and `green` colors.\n- As you see, the starting *position* of the environment is always **Short**.",
"_____no_output_____"
],
[
"### A complete example",
"_____no_output_____"
]
],
[
[
"import gym\nimport gym_anytrading\nfrom gym_anytrading.envs import TradingEnv, ForexEnv, StocksEnv, Actions, Positions \nfrom gym_anytrading.datasets import FOREX_EURUSD_1H_ASK, STOCKS_GOOGL\nimport matplotlib.pyplot as plt\n\nenv = gym.make('forex-v0', frame_bound=(50, 100), window_size=10)\n# env = gym.make('stocks-v0', frame_bound=(50, 100), window_size=10)\n\nobservation = env.reset()\nwhile True:\n action = env.action_space.sample()\n observation, reward, done, info = env.step(action)\n # env.render()\n if done:\n print(\"info:\", info)\n break\n\nplt.cla()\nenv.render_all()\nplt.show()",
"info: {'total_reward': -173.10000000000602, 'total_profit': 0.980652456904312, 'position': 0}\n"
]
],
[
[
"- You can use `render_all` method to avoid rendering on each step and prevent time-wasting.\n- As you see, the first **10** points (`window_size`=10) on the plot don't have a *position*. Because they aren't involved in calculating reward, profit, etc. They just display the first observations. So the environment's `_start_tick` and initial `_last_trade_tick` are **10** and **9**.",
"_____no_output_____"
],
[
"#### Mix with `stable-baselines` and `quantstats`\n\n[Here](https://github.com/AminHP/gym-anytrading/blob/master/examples/a2c_quantstats.ipynb) is an example that mixes `gym-anytrading` with the mentioned famous libraries and shows how to utilize our trading environments in other RL or trading libraries.",
"_____no_output_____"
],
[
"### Extend and manipulate TradingEnv\n\nIn case you want to process data and extract features outside the environment, it can be simply done by two methods:\n\n**Method 1 (Recommended):**",
"_____no_output_____"
]
],
[
[
"def my_process_data(env):\n start = env.frame_bound[0] - env.window_size\n end = env.frame_bound[1]\n prices = env.df.loc[:, 'Low'].to_numpy()[start:end]\n signal_features = env.df.loc[:, ['Close', 'Open', 'High', 'Low']].to_numpy()[start:end]\n return prices, signal_features\n\n\nclass MyForexEnv(ForexEnv):\n _process_data = my_process_data\n\n\nenv = MyForexEnv(df=FOREX_EURUSD_1H_ASK, window_size=12, frame_bound=(12, len(FOREX_EURUSD_1H_ASK)))",
"_____no_output_____"
]
],
[
[
"**Method 2:**",
"_____no_output_____"
]
],
[
[
"def my_process_data(df, window_size, frame_bound):\n start = frame_bound[0] - window_size\n end = frame_bound[1]\n prices = df.loc[:, 'Low'].to_numpy()[start:end]\n signal_features = df.loc[:, ['Close', 'Open', 'High', 'Low']].to_numpy()[start:end]\n return prices, signal_features\n\n\nclass MyStocksEnv(StocksEnv):\n \n def __init__(self, prices, signal_features, **kwargs):\n self._prices = prices\n self._signal_features = signal_features\n super().__init__(**kwargs)\n\n def _process_data(self):\n return self._prices, self._signal_features\n\n \nprices, signal_features = my_process_data(df=STOCKS_GOOGL, window_size=30, frame_bound=(30, len(STOCKS_GOOGL)))\nenv = MyStocksEnv(prices, signal_features, df=STOCKS_GOOGL, window_size=30, frame_bound=(30, len(STOCKS_GOOGL)))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa742ab4b349d7304dbcc221042a46c63488b3 | 1,452 | ipynb | Jupyter Notebook | pyspark/LearningPySpark_Code/BonusChapter01/HelloWorldFromPySpark.ipynb | zephyrGit/Pyspark | 6a230f5facc3e840d798a5263b362ce9676d55d7 | [
"Apache-2.0"
] | 1 | 2017-05-04T03:01:55.000Z | 2017-05-04T03:01:55.000Z | BonusChapter01/HelloWorldFromPySpark.ipynb | LittleGaintSS/Learning-PySpark | 6ea70df4efdf06642037162fa0624491cb5fa42c | [
"MIT"
] | null | null | null | BonusChapter01/HelloWorldFromPySpark.ipynb | LittleGaintSS/Learning-PySpark | 6ea70df4efdf06642037162fa0624491cb5fa42c | [
"MIT"
] | 2 | 2020-10-04T15:39:13.000Z | 2021-02-03T17:29:33.000Z | 16.314607 | 56 | 0.483471 | [
[
[
"sc",
"_____no_output_____"
],
[
"sqlContext",
"_____no_output_____"
],
[
"print(sc.version)",
"2.0.0-preview\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7fa744c4e5a053e43c25a7aa053cfc5ed1e3141 | 58,531 | ipynb | Jupyter Notebook | notebooks/.ipynb_checkpoints/Regression-checkpoint.ipynb | tsitsimis/gau-pro | 9662a5f65baeb93af45bcfc62de29c7f3d691d3e | [
"MIT"
] | null | null | null | notebooks/.ipynb_checkpoints/Regression-checkpoint.ipynb | tsitsimis/gau-pro | 9662a5f65baeb93af45bcfc62de29c7f3d691d3e | [
"MIT"
] | null | null | null | notebooks/.ipynb_checkpoints/Regression-checkpoint.ipynb | tsitsimis/gau-pro | 9662a5f65baeb93af45bcfc62de29c7f3d691d3e | [
"MIT"
] | null | null | null | 354.733333 | 54,880 | 0.940459 | [
[
[
"%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"import sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nsys.path.append(\"../\")\nimport gaupro as gp\nimport gaupro.kernels as kernels",
"_____no_output_____"
]
],
[
[
"## Generate some training data\nSample points from the true underlying function and add some noise",
"_____no_output_____"
]
],
[
[
"def black_box(x):\n return x * np.sin(x)",
"_____no_output_____"
],
[
"x_lim = [0, 20]\nn_train = 10\nsigma_n = 0.0\n\nx_train = np.random.uniform(x_lim[0], x_lim[1], n_train)[None, :]\n\ny_train = black_box(x_train).T # Get some samples from the black-box function\ny_train += np.random.normal(0, sigma_n, n_train)[:, None] # Add noise",
"_____no_output_____"
]
],
[
[
"### Pick some test points for evaluation",
"_____no_output_____"
]
],
[
[
"# test inputs\nn_test = 200\nx_test = np.linspace(x_lim[0], x_lim[1], n_test)[None, :]",
"_____no_output_____"
]
],
[
[
"## Fit and Evaluate Gaussian Process",
"_____no_output_____"
]
],
[
[
"# fit\nregressor = gp.Regressor(kernels.se_kernel())\nregressor.fit(x_train, y_train)\nmu, cov = regressor.predict(x_test)",
"_____no_output_____"
],
[
"plt.figure(figsize=(12, 6))\n\nt = np.linspace(x_lim[0], x_lim[1], 100)\nplt.plot(t, black_box(t), c='k', linestyle=':', label=\"True function\")\n\nplt.scatter(x_train, y_train, marker='+', c='r', s=220, zorder=100, label=\"Samples\")\n\nplt.plot(x_test[0], regressor.mu.T[0], c='k', zorder=10, label=\"Mean\")\nplt.fill_between(x_test[0], regressor.mu.T[0] - 3 * np.sqrt(regressor.cov[np.diag_indices_from(regressor.cov)]),\n regressor.mu.T[0] + 3 * np.sqrt(regressor.cov[np.diag_indices_from(regressor.cov)]),\n facecolor='gray', alpha=0.5, label=\"Covariance\")\n\nplt.legend()\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7fa75aeebde62c91e59d8eccd427522d8891f2b | 4,743 | ipynb | Jupyter Notebook | notebooks/video_pipeline.ipynb | Imarcos/scikit-learn-mooc | 69a7a7e891c5a4a9bce8983d7c92326674fda071 | [
"CC-BY-4.0"
] | 1 | 2022-01-25T19:20:21.000Z | 2022-01-25T19:20:21.000Z | notebooks/video_pipeline.ipynb | Imarcos/scikit-learn-mooc | 69a7a7e891c5a4a9bce8983d7c92326674fda071 | [
"CC-BY-4.0"
] | null | null | null | notebooks/video_pipeline.ipynb | Imarcos/scikit-learn-mooc | 69a7a7e891c5a4a9bce8983d7c92326674fda071 | [
"CC-BY-4.0"
] | null | null | null | 22.802885 | 94 | 0.560826 | [
[
[
"# How to define a scikit-learn pipeline and visualize it",
"_____no_output_____"
],
[
"The goal of keeping this notebook is to:\n\n- make it available for users that want to reproduce it locally\n- archive the script in the event we want to rerecord this video with an\n update in the UI of scikit-learn in a future release.",
"_____no_output_____"
],
[
"### First we load the dataset",
"_____no_output_____"
],
[
"We need to define our data and target. In this case we will build a classification model",
"_____no_output_____"
]
],
[
[
"import pandas as pd\n\names_housing = pd.read_csv(\"../datasets/house_prices.csv\", na_values='?')\n\ntarget_name = \"SalePrice\"\ndata, target = ames_housing.drop(columns=target_name), ames_housing[target_name]\ntarget = (target > 200_000).astype(int)",
"_____no_output_____"
]
],
[
[
"We inspect the first rows of the dataframe",
"_____no_output_____"
]
],
[
[
"data",
"_____no_output_____"
]
],
[
[
"We can cherry-pick some features and only retain this subset of data",
"_____no_output_____"
]
],
[
[
"numeric_features = ['LotArea', 'FullBath', 'HalfBath']\ncategorical_features = ['Neighborhood', 'HouseStyle']\ndata = data[numeric_features + categorical_features]",
"_____no_output_____"
]
],
[
[
"### Then we create the pipeline",
"_____no_output_____"
],
[
"The first step is to define the preprocessing steps",
"_____no_output_____"
]
],
[
[
"from sklearn.pipeline import Pipeline\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder\n\nnumeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='median')),\n ('scaler', StandardScaler(),\n)])\n\ncategorical_transformer = OneHotEncoder(handle_unknown='ignore')",
"_____no_output_____"
]
],
[
[
"The next step is to apply the transformations using `ColumnTransformer`",
"_____no_output_____"
]
],
[
[
"from sklearn.compose import ColumnTransformer\n\npreprocessor = ColumnTransformer(transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n])",
"_____no_output_____"
]
],
[
[
"Then we define the model and join the steps in order",
"_____no_output_____"
]
],
[
[
"from sklearn.linear_model import LogisticRegression\n\nmodel = Pipeline(steps=[\n ('preprocessor', preprocessor),\n ('classifier', LogisticRegression()),\n])",
"_____no_output_____"
]
],
[
[
"Let's visualize it!",
"_____no_output_____"
]
],
[
[
"from sklearn import set_config\n\nset_config(display='diagram')\nmodel",
"_____no_output_____"
]
],
[
[
"### Finally we score the model",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_validate\n\ncv_results = cross_validate(model, data, target, cv=5)\nscores = cv_results[\"test_score\"]\nprint(\"The mean cross-validation accuracy is: \"\n f\"{scores.mean():.3f} +/- {scores.std():.3f}\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa7f6da732b53aa34893c5b98b77bcd2fd21b2 | 64,736 | ipynb | Jupyter Notebook | ml_foundations/05_Pipeline/05_02/End/05_02.ipynb | joejoeyjoseph/playground | fa739d51635823b866fafd1e712760074cfc175c | [
"MIT"
] | null | null | null | ml_foundations/05_Pipeline/05_02/End/05_02.ipynb | joejoeyjoseph/playground | fa739d51635823b866fafd1e712760074cfc175c | [
"MIT"
] | null | null | null | ml_foundations/05_Pipeline/05_02/End/05_02.ipynb | joejoeyjoseph/playground | fa739d51635823b866fafd1e712760074cfc175c | [
"MIT"
] | null | null | null | 108.61745 | 23,376 | 0.801069 | [
[
[
"## Pipeline: Clean Continuous Features\n\nUsing the Titanic dataset from [this](https://www.kaggle.com/c/titanic/overview) Kaggle competition.\n\nThis dataset contains information about 891 people who were on board the ship when departed on April 15th, 1912. As noted in the description on Kaggle's website, some people aboard the ship were more likely to survive the wreck than others. There were not enough lifeboats for everybody so women, children, and the upper-class were prioritized. Using the information about these 891 passengers, the challenge is to build a model to predict which people would survive based on the following fields:\n\n- **Name** (str) - Name of the passenger\n- **Pclass** (int) - Ticket class\n- **Sex** (str) - Sex of the passenger\n- **Age** (float) - Age in years\n- **SibSp** (int) - Number of siblings and spouses aboard\n- **Parch** (int) - Number of parents and children aboard\n- **Ticket** (str) - Ticket number\n- **Fare** (float) - Passenger fare\n- **Cabin** (str) - Cabin number\n- **Embarked** (str) - Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)\n\n**This notebook will implement some of the cleaning that was done in Section 2: EDA & Data Cleaning**\n\n![Clean Data](../../img/clean_data.png)",
"_____no_output_____"
],
[
"### Read in Data",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\n%matplotlib inline\n\ntitanic = pd.read_csv('../../../titanic.csv')\ntitanic.head()",
"_____no_output_____"
]
],
[
[
"### Clean continuous variables\n1. Fill in missing values for `Age`\n2. Combine `SibSp` & `Parch`\n3. Drop irrelevant/repetitive variables (`SibSp`, `Parch`, `PassengerId`)",
"_____no_output_____"
],
[
"#### Fill missing for `Age`",
"_____no_output_____"
]
],
[
[
"titanic.isnull().sum()",
"_____no_output_____"
],
[
"titanic['Age'].fillna(titanic['Age'].mean(), inplace=True)",
"_____no_output_____"
]
],
[
[
"#### Combine `SibSp` & `Parch`",
"_____no_output_____"
]
],
[
[
"for i, col in enumerate(['SibSp', 'Parch']):\n plt.figure(i)\n sns.catplot(x=col, y='Survived', data=titanic, kind='point', aspect=2, )",
"_____no_output_____"
],
[
"titanic['Family_cnt'] = titanic['SibSp'] + titanic['Parch']",
"_____no_output_____"
]
],
[
[
"#### Drop unnnecessary variables",
"_____no_output_____"
]
],
[
[
"titanic.drop(['PassengerId', 'SibSp', 'Parch'], axis=1, inplace=True)",
"_____no_output_____"
],
[
"titanic.head(10)",
"_____no_output_____"
]
],
[
[
"### Write out cleaned data",
"_____no_output_____"
]
],
[
[
"titanic.to_csv('../../../titanic_cleaned.csv', index=False)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa9243bccb0a66a6bf1b1fb79447bb00b21322 | 441,201 | ipynb | Jupyter Notebook | **WeatherPy**/WeatherPy.ipynb | drupps/python-api-challenge | 11256a1f407fb269b8cd607e2227bf5772b8542b | [
"ADSL"
] | null | null | null | **WeatherPy**/WeatherPy.ipynb | drupps/python-api-challenge | 11256a1f407fb269b8cd607e2227bf5772b8542b | [
"ADSL"
] | null | null | null | **WeatherPy**/WeatherPy.ipynb | drupps/python-api-challenge | 11256a1f407fb269b8cd607e2227bf5772b8542b | [
"ADSL"
] | null | null | null | 164.198362 | 43,620 | 0.872076 | [
[
[
"# WeatherPy\n----\n\n#### Note\n* Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.",
"_____no_output_____"
]
],
[
[
"# Dependencies and Setup\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport requests\nimport time\nfrom random import uniform\nfrom scipy.stats import linregress\nfrom scipy import stats\n\n\n# Import API key\nfrom config import weather_api_key\n\n# Incorporated citipy to determine city based on latitude and longitude\nfrom citipy import citipy\n\n# Range of latitudes and longitudes\nlat_range = (-90, 90)\nlng_range = (-180, 180)",
"_____no_output_____"
]
],
[
[
"## Generate Cities List",
"_____no_output_____"
]
],
[
[
"# List for holding lat_lngs and cities\nlat_lngs = []\ncities = []\n\n# Create a set of random lat and lng combinations\nlats = np.random.uniform(low=-90.000, high=90.000, size=1500)\nlngs = np.random.uniform(low=-180.000, high=180.000, size=1500)\nlat_lngs = zip(lats, lngs)",
"_____no_output_____"
],
[
"# Identify nearest city for each lat, lng combination\nfor lat_lng in lat_lngs:\n city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name\n \n # If the city is unique, then add it to a our cities list\n if city not in cities:\n cities.append(city)\n\n# Print the city count to confirm sufficient count\nlen(cities)",
"_____no_output_____"
],
[
"cities",
"_____no_output_____"
]
],
[
[
"### Perform API Calls\n* Perform a weather check on each city using a series of successive API calls.\n* Include a print log of each city as it's being processed (with the city number and city name).",
"_____no_output_____"
]
],
[
[
"# Set up the url\nurl = \"http://api.openweathermap.org/data/2.5/weather?\"\nunits = \"imperial\"\n\nquery_url = f\"{url}appid={weather_api_key}&units={units}&q=\"\nprint(query_url)",
"http://api.openweathermap.org/data/2.5/weather?appid=ad86d236d53d3741491f9b7f50676e8e&units=imperial&q=\n"
],
[
"#creating lists to store extracted values per city\ncity_name = []\ncountry = []\ndate = []\nlat = []\nlng = []\ntemp = []\nhumidity = []\ncloudiness = []\nwind = []\ncity_id = []\n\n#setting the counter values\nrecord = 0\n\nprint(\"Beginning Data Retrieval\")\n\nprint(\"--------------------------------\")\n\n#creating loop to extract values per city and add them to the lists above\nfor city in cities:\n \n try:\n response = requests.get(f\"{query_url}{city}\").json()\n country.append(response[\"sys\"][\"country\"])\n date.append(response[\"dt\"])\n lat.append(response[\"coord\"][\"lat\"])\n lng.append(response[\"coord\"][\"lon\"])\n temp.append(response[\"main\"][\"temp_max\"])\n humidity.append(response[\"main\"][\"humidity\"])\n cloudiness.append(response[\"clouds\"][\"all\"])\n wind.append(response[\"wind\"][\"speed\"])\n city_record = response[\"name\"]\n city_id = response[\"id\"]\n \n #creating an if statment to print the \n if record > 600:\n break\n\n else:\n record += 1\n city_name.append(response[\"name\"])\n \n print(f\"The city is {city_record} and the city id is {city_id}.\")\n \n #using time.sleep to create time delay\n time.sleep(3)\n \n except :\n print(\"City not found. Skipping...\")\n \n continue\n\nprint(\"-------------------------------\")\nprint(\"Data Retrieval Complete\")\nprint(\"-------------------------------\")\n",
"Beginning Data Retrieval\n--------------------------------\nThe city is Mehrān and the city id is 124291.\nThe city is Busselton and the city id is 2075265.\nThe city is Bulgan and the city id is 2032201.\nThe city is Bethel and the city id is 5282297.\nCity not found. Skipping...\nThe city is Mar del Plata and the city id is 3430863.\nThe city is Isangel and the city id is 2136825.\nThe city is Tuktoyaktuk and the city id is 6170031.\nThe city is Kirakira and the city id is 2178753.\nThe city is Meulaboh and the city id is 1214488.\nThe city is Rikitea and the city id is 4030556.\nThe city is Nālūt and the city id is 2214432.\nThe city is Riohacha and the city id is 3670745.\nCity not found. Skipping...\nThe city is Pacific Grove and the city id is 5380437.\nThe city is Ati and the city id is 2436400.\nThe city is Anadyr and the city id is 2127202.\nThe city is Avarua and the city id is 4035715.\nThe city is Souillac and the city id is 933995.\nThe city is Punta Arenas and the city id is 3874787.\nCity not found. Skipping...\nThe city is Mataura and the city id is 6201424.\nThe city is East London and the city id is 1006984.\nThe city is Inuvik and the city id is 5983607.\nThe city is San Patricio and the city id is 3985168.\nThe city is Nikolskoye and the city id is 546105.\nThe city is Puerto Escondido and the city id is 3520994.\nThe city is Bandarbeyla and the city id is 64814.\nThe city is Hilo and the city id is 5855927.\nThe city is Castro and the city id is 3466704.\nThe city is Al Bardīyah and the city id is 80509.\nThe city is Hermanus and the city id is 3366880.\nThe city is Vaini and the city id is 4032243.\nThe city is Dakar and the city id is 2253354.\nThe city is Luba and the city id is 2309528.\nThe city is Katsuura and the city id is 2112309.\nThe city is Tiznit Province and the city id is 2527087.\nThe city is Mount Gambier and the city id is 2156643.\nThe city is Cape Town and the city id is 3369157.\nThe city is Gamba and the city id is 2400547.\nThe city is Altay and the city id is 1529651.\nThe city is Payson and the city id is 5779548.\nThe city is Tezu and the city id is 1254709.\nThe city is Erbaa and the city id is 747489.\nThe city is Qaanaaq and the city id is 3831208.\nThe city is Marabá and the city id is 3395503.\nThe city is Bluff and the city id is 2206939.\nThe city is Thompson and the city id is 6165406.\nThe city is Khatanga and the city id is 2022572.\nThe city is Albany and the city id is 5106841.\nThe city is Port Blair and the city id is 1259385.\nThe city is Esperance and the city id is 2071860.\nThe city is Ushuaia and the city id is 3833367.\nThe city is Cabo San Lucas and the city id is 3985710.\nThe city is Omboué and the city id is 2396853.\nThe city is Faanui and the city id is 4034551.\nThe city is Arraial do Cabo and the city id is 3471451.\nThe city is Rodrigues Alves and the city id is 3665210.\nThe city is Port Macquarie and the city id is 2152659.\nThe city is Catalina and the city id is 5288784.\nCity not found. Skipping...\nThe city is Beloha and the city id is 1067565.\nThe city is Wanning and the city id is 1791779.\nThe city is Hof and the city id is 2902768.\nThe city is Morondava and the city id is 1058381.\nThe city is Carnarvon and the city id is 2074865.\nThe city is Shache and the city id is 1280037.\nThe city is Constitución and the city id is 3893726.\nThe city is Abhā and the city id is 110690.\nThe city is Kapaa and the city id is 5848280.\nThe city is Neiafu and the city id is 4032420.\nCity not found. Skipping...\nThe city is Itoman and the city id is 1861280.\nThe city is Kamenka and the city id is 553766.\nCity not found. Skipping...\nThe city is Arman' and the city id is 2127060.\nThe city is George Town and the city id is 1735106.\nThe city is Xai-Xai and the city id is 1024552.\nThe city is Naze and the city id is 1855540.\nThe city is Butaritari and the city id is 2110227.\nThe city is Maţāy and the city id is 352628.\nThe city is Margate and the city id is 2643044.\nThe city is Saskylakh and the city id is 2017155.\nThe city is Bengkulu and the city id is 1649150.\nThe city is Mahébourg and the city id is 934322.\nThe city is Torbay and the city id is 6167817.\nCity not found. Skipping...\nThe city is Prince Rupert and the city id is 6113406.\nThe city is Port Alfred and the city id is 964432.\nThe city is Tasiilaq and the city id is 3424607.\nThe city is Provideniya and the city id is 4031574.\nThe city is Den Helder and the city id is 2757220.\nThe city is Peniche and the city id is 2264923.\nThe city is Noumea and the city id is 2139521.\nThe city is Hofn and the city id is 2630299.\nThe city is Atuona and the city id is 4020109.\nThe city is Lebu and the city id is 3883457.\nThe city is Ponta do Sol and the city id is 2264557.\nThe city is Tazovsky and the city id is 1489853.\nThe city is Kavieng and the city id is 2094342.\nThe city is Chingola and the city id is 919009.\nThe city is San Quintín and the city id is 3984997.\nThe city is Jamestown and the city id is 5122534.\nThe city is Isetskoye and the city id is 1505466.\nThe city is Cherskiy and the city id is 2126199.\nThe city is Erzin and the city id is 296852.\nThe city is Dikson and the city id is 1507390.\nThe city is Longyearbyen and the city id is 2729907.\nThe city is Sampit and the city id is 1628884.\nThe city is Hithadhoo and the city id is 1282256.\nCity not found. Skipping...\nThe city is Novy Urengoy and the city id is 1496511.\nThe city is Moussoro and the city id is 2427336.\nThe city is Kruisfontein and the city id is 986717.\nThe city is Russell and the city id is 4047434.\nThe city is Sabha and the city id is 2212775.\nThe city is Alice Springs and the city id is 2077895.\nThe city is Fortuna and the city id is 5563839.\nThe city is Chokurdakh and the city id is 2126123.\nThe city is Sinop Province and the city id is 739598.\nThe city is Haikou and the city id is 1809078.\nThe city is Sitka and the city id is 5557293.\nThe city is Lorengau and the city id is 2092164.\nCity not found. Skipping...\nThe city is Vao and the city id is 2137773.\nThe city is Igarka and the city id is 1505991.\nCity not found. Skipping...\nThe city is Hobart and the city id is 2163355.\nThe city is Healdsburg and the city id is 5356012.\nThe city is Eloy and the city id is 5294167.\nThe city is Upernavik and the city id is 3418910.\nThe city is Hervey Bay and the city id is 2146219.\nThe city is Øksfjord and the city id is 778362.\nThe city is Phek and the city id is 1259784.\nThe city is Verkhovazh'ye and the city id is 474354.\nThe city is Leningradskiy and the city id is 2123814.\nThe city is Haines Junction and the city id is 5969025.\nThe city is Rabak and the city id is 368277.\nThe city is Plettenberg Bay and the city id is 964712.\nThe city is Safi and the city id is 2537881.\nThe city is Tondano and the city id is 1623424.\nThe city is Ribeira Grande and the city id is 3372707.\nThe city is Port Elizabeth and the city id is 964420.\nThe city is Wonthaggi and the city id is 2154826.\nThe city is Clyde River and the city id is 5924351.\nThe city is Nizhniy Baskunchak and the city id is 520798.\nThe city is Airai and the city id is 1651810.\nThe city is Am Timan and the city id is 245338.\nThe city is Vila Franca do Campo and the city id is 3372472.\nThe city is Aklavik and the city id is 5882953.\nThe city is Dukat and the city id is 2125906.\nThe city is Saint-Philippe and the city id is 935215.\nThe city is New Norfolk and the city id is 2155415.\nThe city is Cassilândia and the city id is 3466750.\nThe city is Guerrero Negro and the city id is 4021858.\nThe city is Galle and the city id is 1246294.\nThe city is Coyhaique and the city id is 3894426.\nThe city is Kloulklubed and the city id is 7671223.\nThe city is Bambous Virieux and the city id is 1106677.\nThe city is Hrubieszów and the city id is 770966.\nThe city is Ahipara and the city id is 2194098.\nThe city is Mount Isa and the city id is 2065594.\nThe city is Nome and the city id is 5870133.\nThe city is Ostrovnoy and the city id is 556268.\nThe city is Broome and the city id is 5110365.\nThe city is Severo-Kuril'sk and the city id is 2121385.\nThe city is Yellowknife and the city id is 6185377.\nThe city is Tautira and the city id is 4033557.\nThe city is Broken Hill and the city id is 2173911.\nThe city is Zabaykal'sk and the city id is 2012780.\n"
]
],
[
[
"### Convert Raw Data to DataFrame\n* Export the city data into a .csv.\n* Display the DataFrame",
"_____no_output_____"
]
],
[
[
"# Create a data frame from the data\nweather_dict = { #key on left, right side is values\n \"City\": city_name,\n \"Cloudiness\": cloudiness,\n \"Country\": country,\n \"Date\": date,\n \"Humidity\": humidity,\n \"Lat\": lat,\n \"Lng\": lng,\n \"Max Temp\": temp,\n \"Wind Speed\": wind\n}\n\n# Put data into data frame\nweather_data_df = pd.DataFrame(weather_dict)\n\n# Push the new Data Frame to a new CSV file\nweather_data_df.to_csv(\"../weather_data.csv\",\n encoding=\"utf-8\", index=False, header=True)\n\n# Display the new data frame\nweather_data_df.head()",
"_____no_output_____"
],
[
"#perform count on data frame, to make sure all columns are filled\nweather_data_df.count()",
"_____no_output_____"
]
],
[
[
"## Inspect the data and remove the cities where the humidity > 100%.\n----\nSkip this step if there are no cities that have humidity > 100%. ",
"_____no_output_____"
]
],
[
[
"weather_data_df[weather_data_df[\"Humidity\"]>100]",
"_____no_output_____"
],
[
"# Get the indices of cities that have humidity over 100%.\nweather_data_df = weather_data_df.loc[(weather_data_df[\"Humidity\"] < 100)]\nweather_data_df",
"_____no_output_____"
],
[
"# Make a new DataFrame equal to the city data to drop all humidity outliers by index.\n# Passing \"inplace=False\" will make a copy of the city_data DataFrame, which we call \"clean_city_data\".\nclean_city_data_df = weather_data_df.dropna(how='any')\nclean_city_data_df.count()",
"_____no_output_____"
]
],
[
[
"## Plotting the Data\n* Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.\n* Save the plotted figures as .pngs.",
"_____no_output_____"
],
[
"## Latitude vs. Temperature Plot",
"_____no_output_____"
]
],
[
[
"# Plot the graph\nplt.scatter(lat, temp, marker=\"o\", facecolors=\"tab:blue\", edgecolors=\"black\")\n\n# Setting the title and axises \nplt.title(\"City Latitude vs. Max Temperature (9/2020)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Max Temperature (F)\")\n\n# Add in a grid for the chart\nplt.grid()\n\n# Save our graph and show the grap\nplt.tight_layout()\nplt.savefig(\"../Images/city_lat_vs_max_temp.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Latitude vs. Humidity Plot",
"_____no_output_____"
]
],
[
[
"# Plot the graph\nplt.scatter(lat, humidity, marker=\"o\", facecolors=\"tab:blue\", edgecolors=\"black\")\n\n# Setting the title and axises \nplt.title(\"City Latitude vs. Humidity (9/2020)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Humidity (%)\")\n\n# Add in a grid for the chart\nplt.grid()\n\n# Setting graph limits\nplt.xlim(-60, 85)\nplt.ylim(0, 105)\n\n# Save our graph and show the grap\n#plt.tight_layout()\nplt.savefig(\"../Images/city_lat_vs_humidity.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Latitude vs. Cloudiness Plot",
"_____no_output_____"
]
],
[
[
"# Plot the graph\nplt.scatter(lat, cloudiness, marker=\"o\", facecolors=\"tab:blue\", edgecolors=\"black\")\n\n# Setting the title and axises \nplt.title(\"City Latitude vs. Cloudiness (9/2020)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Cloudiness (%)\")\n\n# Add in a grid for the chart\nplt.grid()\n\n# Save our graph and show the grap\nplt.tight_layout()\nplt.savefig(\"../Images/city_lat_vs_cloudiness.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Latitude vs. Wind Speed Plot",
"_____no_output_____"
]
],
[
[
"# Plot the graph\nplt.scatter(lat, wind, marker=\"o\", facecolors=\"tab:blue\", edgecolors=\"black\")\n\n# Setting the title and axises \nplt.title(\"City Latitude vs. Wind Speed (9/2020)\")\nplt.xlabel(\"Latitude\")\nplt.ylabel(\"Wind Speed (MPH)\")\n\n# Add in a grid for the chart\nplt.grid()\n\n# Save our graph and show the grap\nplt.tight_layout()\nplt.savefig(\"../Images/city_lat_vs_wind_speed.png\")\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Linear Regression",
"_____no_output_____"
],
[
"#### Northern Hemisphere - Max Temp vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"north_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(north_hem['Lat'], north_hem['Max Temp'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Max Temp')\nplt.title(\"Northern Hemisphere - Max Temp vs. Latitude Linear Regression\")\n\nx_values = north_hem['Lat']\ny_values = north_hem['Max Temp']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.694055871357037\n"
]
],
[
[
"#### Southern Hemisphere - Max Temp vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"south_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(south_hem['Lat'], south_hem['Max Temp'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Max Temp (F)')\nplt.title(\"Southern Hemisphere - Max Temp vs. Latitude Linear Regression\")\n\nx_values = south_hem['Lat']\ny_values = south_hem['Max Temp']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.694055871357037\n"
]
],
[
[
"#### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"north_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(north_hem['Lat'], north_hem['Humidity'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Humidity (%)')\nplt.title(\"Northern Hemisphere - Humidity vs. Latitude Linear Regression\")\n\nx_values = north_hem['Lat']\ny_values = north_hem['Humidity']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.00679787383915855\n"
]
],
[
[
"#### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"south_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(south_hem['Lat'], south_hem['Humidity'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Humidity (%)')\nplt.title(\"Southern Hemisphere - Humidity vs. Latitude Linear Regression\")\n\nx_values = south_hem['Lat']\ny_values = south_hem['Humidity']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.00679787383915855\n"
]
],
[
[
"#### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"north_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(north_hem['Lat'], north_hem['Cloudiness'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Cloudiness (%)')\nplt.title(\"Northern Hemisphere - Cloudiness vs. Latitude Linear Regression\")\n\nx_values = north_hem['Lat']\ny_values = north_hem['Cloudiness']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.00044310179247288993\n"
]
],
[
[
"#### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"south_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(south_hem['Lat'], south_hem['Cloudiness'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Cloudiness (%)')\nplt.title(\"Southern Hemisphere - Max Temp vs. Latitude Linear Regression\")\n\nx_values = south_hem['Lat']\ny_values = south_hem['Cloudiness']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.00044310179247288993\n"
]
],
[
[
"#### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"north_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(north_hem['Lat'], north_hem['Wind Speed'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Wind Speed (MPH)')\nplt.title(\"Northern Hemisphere - Wind Speed vs. Latitude Linear Regression\")\n\nx_values = north_hem['Lat']\ny_values = north_hem['Wind Speed']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.02084202630425654\n"
]
],
[
[
"#### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression",
"_____no_output_____"
]
],
[
[
"south_hem = weather_data_df.loc[weather_data_df['Lat'] > 0,]\n\nplt.scatter(south_hem['Lat'], south_hem['Wind Speed'], marker=\"o\", facecolors=\"dodgerblue\")\n\nplt.xlabel('Latitude')\nplt.ylabel('Windspeed (MPH)')\nplt.title(\"Southern Hemisphere - Wind Speed vs. Latitude Linear Regression\")\n\nx_values = south_hem['Lat']\ny_values = south_hem['Wind Speed']\n\n(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)\n\nregress_values = x_values * slope + intercept\nline_eq = \"y = \" + str(round(slope,2)) + \"x + \" + str(round(intercept,2))\n\nplt.plot(x_values,regress_values,\"r-\")\n\n#Printing R Value\nprint(f\"R Val is {rvalue**2}\")",
"R Val is 0.02084202630425654\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fa92bc2076bde339fe670730bee77cd4e1118d | 360,495 | ipynb | Jupyter Notebook | Figure 6 (Conductance model), Figure 5B (Model derived inhibition).ipynb | sahilm89/PreciseBalance | 9df65b5956b40f18b84168b69d7ce1138b47b9d4 | [
"MIT"
] | null | null | null | Figure 6 (Conductance model), Figure 5B (Model derived inhibition).ipynb | sahilm89/PreciseBalance | 9df65b5956b40f18b84168b69d7ce1138b47b9d4 | [
"MIT"
] | null | null | null | Figure 6 (Conductance model), Figure 5B (Model derived inhibition).ipynb | sahilm89/PreciseBalance | 9df65b5956b40f18b84168b69d7ce1138b47b9d4 | [
"MIT"
] | null | null | null | 136.60288 | 36,148 | 0.850741 | [
[
[
"# Figure 6",
"_____no_output_____"
]
],
[
[
"from sympy import symbols, exp, solve, logcombine, simplify, Piecewise, lambdify, N, init_printing, Eq\nimport numpy\nimport scipy.stats as ss\nfrom sympy.physics.units import seconds, siemens, volts, farads, amperes, milli, micro, nano, pico, ms, s, kg, meters\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom matplotlib.colors import LinearSegmentedColormap\nimport matplotlib.patches as patches\nplt.style.use('neuron_color')\nimport os\nimport sys\nsys.path.append('../')\nfrom Linearity import Neuron\nimport lmfit\nfrom pickle import dump",
"_____no_output_____"
],
[
"def simpleaxis(axes, every=False, outward=False, hideTitle=True):\n if not isinstance(axes, (list, numpy.ndarray)):\n axes = [axes]\n for ax in axes:\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n if (outward):\n ax.spines['bottom'].set_position(('outward', 10))\n ax.spines['left'].set_position(('outward', 10))\n if every:\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.get_xaxis().tick_bottom()\n ax.get_yaxis().tick_left()\n if hideTitle:\n ax.set_title('')",
"_____no_output_____"
],
[
"from IPython.display import display, Markdown, Image",
"_____no_output_____"
],
[
"init_printing()",
"_____no_output_____"
]
],
[
[
"## 6 A Circuit diagram",
"_____no_output_____"
],
[
"<img src=\"Fig_6_a.png\" alt=\"Drawing\" style=\"width: 800px;\"/>",
"_____no_output_____"
]
],
[
[
"prefix = '/home/bhalla/Documents/Codes/data'",
"_____no_output_____"
]
],
[
[
"## 6 B: Fitting voltage clamp data to get parameters",
"_____no_output_____"
]
],
[
[
"analysisFile = prefix + '/media/sahil/NCBS_Shares_BGStim/patch_data/170530/c1_EI/plots/c1_EI.pkl'\nplotDir = os.path.dirname(analysisFile)\nneuron = Neuron.load(analysisFile)",
"_____no_output_____"
]
],
[
[
"$g(t) = \\bar{g}\\frac{( e^\\frac{\\delta_{onset} - t }{\\tau_{decay}} - e^\\frac{\\delta_{onset} - t }{\\tau_{rise}})}{- \\left(\\frac{\\tau_{rise}}{\\tau_{decay}}\\right)^{\\frac{\\tau_{decay}}{\\tau_{decay} - \\tau_{rise}}} + \\left(\\frac{\\tau_{rise}}{\\tau_{decay}}\\right)^{\\frac{\\tau_{rise}}{\\tau_{decay} - \\tau_{rise}}}}$",
"_____no_output_____"
]
],
[
[
"def fitFunctionToPSP(time, vector, t_0=0, g_max=0):\n ''' Fits using lmfit '''\n\n def _doubleExponentialFunction(t, t_0, tOn, tOff, g_max):\n ''' Returns the shape of an EPSP as a double exponential function '''\n tPeak = t_0 + float(((tOff * tOn)/(tOff-tOn)) * numpy.log(tOff/tOn))\n A = 1./(numpy.exp(-(tPeak-t_0)/tOff) - numpy.exp(-(tPeak-t_0)/tOn))\n g = [ g_max * A * (numpy.exp(-(t_point-t_0)/tOff) - numpy.exp(-(t_point-t_0)/tOn)) if t_point >= t_0 else 0. for t_point in t]\n return numpy.array(g)\n\n model = lmfit.Model(_doubleExponentialFunction)\n # Fixing values of variables from data\n # Onset time\n if not t_0:\n model.set_param_hint('t_0', value =max(time)/10., min=0., max = max(time))\n else:\n model.set_param_hint('t_0', value = t_0, vary=False)\n # g_max \n if not g_max:\n model.set_param_hint('g_max', value = max(vector)/10., min = 0., max = max(vector))\n else:\n model.set_param_hint('g_max', value = g_max, vary=False)\n\n model.set_param_hint('tOn', value =max(time)/5.1 , min = 0., max = max(time))\n model.set_param_hint('t_ratio', value =10., min=1.05)\n model.set_param_hint('tOff', min = 0., expr='tOn*t_ratio')\n model.set_param_hint('t_peak', expr = 't_0 + ((tOff * tOn)/(tOff-tOn)) * log(tOff/tOn)')\n pars = model.make_params()\n\n result = model.fit(vector, pars, t=time)\n # print (result.fit_report())\n return result",
"_____no_output_____"
],
[
"n = {key:value for key,value in neuron}\nfor numSq in set(n[1]).intersection(set(n[2])):\n for i in set(n[1][numSq].trial).intersection(set(n[2][numSq].trial)):\n if i == 3 and numSq == 7:\n exc = -1e9*n[1][numSq].trial[i].interestWindow\n inh = 1e9*n[2][numSq].trial[i].interestWindow\n time = numpy.arange(len(n[1][numSq].trial[i].interestWindow))*n[1][numSq].trial[i].samplingTime\n exc_fit = fitFunctionToPSP(time, exc)\n inh_fit = fitFunctionToPSP(time, inh)\n print (exc_fit.fit_report())\n print (inh_fit.fit_report())\n fig,ax = plt.subplots() \n ax.plot(time*1e3, exc, alpha=0.6, c='indigo')\n ax.set_xlabel(\"Time (ms)\")\n ax.set_ylabel(\"Current (pA)\")\n ax.plot(time*1e3, exc_fit.best_fit, label=\"Excitation\", c='indigo')\n\n ax.plot(time*1e3, -inh, alpha=0.6, c='k')\n ax.plot(time*1e3, -inh_fit.best_fit, label=\"Inhibition\", c='k')\n ax.annotate('Excitation', (50,100), (50,100), xycoords='data',color='indigo')\n ax.annotate('Inhibition', (50,-300), (50,-300), xycoords='data',color='k')\n fig.set_figwidth(1.5)\n fig.set_figheight(1.5)\n simpleaxis(ax)\n dump(fig,file('figures/fig6/6b.pkl','wb'))\n plt.show()",
"_____no_output_____"
],
[
"samplingRate = 20 # kHz, to get milliseconds\nsample_every = 10 # points\ntimeStep, maxTime = (sample_every*1.)/ samplingRate, 100. # ms\ntrange = numpy.arange(\n 0., maxTime, timeStep) # We will always use 100. ms timecourse of PSPs.",
"_____no_output_____"
],
[
"#### Range of $g_e$ explored\nemax = 3.\ne_step = 0.5\nerange = numpy.arange(0., emax, e_step)\n#### Range of proportionality ($P$) between $E$ and $I$\nprop_array = numpy.arange(0, 6, 1)",
"_____no_output_____"
],
[
"## Setting up the variables, parameters and units for simulation\nt, P, e_r, e_d, delta_e, rho_e, g_e, i_r, i_d, delta_i, rho_i, g_i, b, Cm, g_L = symbols(\n 't P \\\\tau_{er} \\\\tau_{ed} \\\\delta_e \\\\rho_e \\\\bar{g}_e \\\\tau_{ir} \\\\tau_{id} \\\\delta_i \\\\rho_i \\\\bar{g}_i \\\\beta C_m \\\\bar{g}_L',\n positive=True,\n real=True)\nleak_rev, e_rev, i_rev, Vm = symbols(\n 'Leak_{rev} Exc_{rev} Inh_{rev} V_m', real=True)\nSymbolDict = {\n t: \"Time (ms)\",\n P: \"Proportion of $g_i/g_e$\",\n e_r: \"Excitatory Rise (ms)\",\n e_d: \"Excitatory Fall (ms)\",\n delta_e: \"Excitatory onset time (ms)\",\n rho_e: \"Excitatory $tau$ ratio (fall/rise)\",\n g_e: \"Excitatory max conductance\",\n i_r: \"Inhibitory Rise (ms)\",\n i_d: \"Inhibitory Fall(ms)\",\n delta_i: \"Inhibitory onset time(ms)\",\n rho_i: \"Inhibitory $tau$ ratio (fall/rise)\",\n g_i: \"Inhibitory max conductance\",\n b: \"Inhibitory/Excitatory $tau$ rise ratio\"\n}\nunitsDict = {\n 's': seconds,\n 'exp': exp,\n 'S': siemens,\n 'V': volts,\n 'A': amperes,\n 'm': meters,\n 'kg': kg\n} # This is for lamdify\nnS, pF, mV, pA = nano * siemens, pico * farads, milli * volts, pico*amperes\n### Estimates from data and averaging them to get a number\nestimateDict = {\n P: (1,5),\n #e_r: (1.5 * ms, 5 * ms),\n #e_d: (8. * ms, 20. * ms),\n e_r: (7. * ms, 7. * ms),\n e_d: (16. * ms, 16. * ms),\n delta_e: (0. * ms, 0. * ms),\n rho_e: (2., 7.),\n g_e: (0.02 * nS, 0.25 * nS),\n #i_r: (1.5 * ms, 5. * ms),\n #i_d: (14. * ms, 60. * ms),\n i_r: (13. * ms, 13. * ms),\n i_d: (27. * ms, 27. * ms),\n delta_i: (2. * ms, 4. * ms),\n rho_i: (5., 20.),\n g_i: (0.04 * nS, 0.5 * nS),\n b: (0.5, 5.)\n}\naverageEstimateDict = {\n key: value[0] + value[1] / 2\n for key, value in estimateDict.items()\n}\n\n### Approximating the rest from literature\napproximateDict = {\n g_L: 6.25 * nS, # Changing from 10 to 6.25\n e_rev: 0. * mV,\n i_rev: -70. * mV,\n leak_rev: -65. * mV,\n Cm: 100 * pF\n}\nsourceDict = {\n g_L: \"None\",\n e_rev: \"None\",\n i_rev: \"None\",\n leak_rev: \"None\",\n Cm: \"Neuroelectro.org\"\n}",
"_____no_output_____"
]
],
[
[
"| Variable | Meaning | Range |\n|---|---|---|\n|$t$|Time (ms)|0-100|\n|$P$|Proportion of $g_i/g_e$|2-4|\n|$\\tau_{er}$|Excitatory Rise (ms)|1.5-5|\n|$\\tau_{ed}$|Excitatory Fall (ms)|8-20|\n|$\\delta_e$|Excitatory onset time (ms)|0-0|\n|$\\rho_e$|Excitatory $tau$ ratio (fall/rise)|2-7|\n|$\\bar{g}_e$|Excitatory max conductance|0.02-0.25|\n|$\\tau_{ir}$|Inhibitory Rise (ms)|1.5-5|\n|$\\tau_{id}$|Inhibitory Fall(ms)|14-60|\n|$\\delta_i$|Inhibitory onset time(ms)|3-15|\n|$\\rho_i$|Inhibitory $tau$ ratio (fall/rise)|5-20|\n|$\\bar{g}_i$|Inhibitory max conductance|0.04-0.5|\n|$\\beta$|Inhibitory/Excitatory $tau$ rise ratio|0.5-5|",
"_____no_output_____"
],
[
"| Variable | Meaning | Source | Value |\n|---|---|---|\n|$g_L$|Leak conductance| Fernandos and White, J. Neuro. (2010) | 10 nS |\n|$Exc_{rev}$|Excitatory reversal|Calculated (Methods)| 0 mV|\n|$Inh_{rev}$|Inhibitory reversal |Calculated (Methods)| -70 mV |\n|$Leak_{rev}$|Leak reversal |Fernandos and White, J. Neuro. (2010)| -65 mV |\n|$C_m$|Membrane capacitance |neuroelectro.org| 100 pF|",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
]
],
[
[
"### Double exponential to explain the net synaptic conductance.\nalpha = exp(-(t - delta_e) / e_d) - exp(-(t - delta_e) / e_r)\nalpha_prime = alpha.diff(t)\ntheta_e = solve(alpha_prime, t) # Time to peak\ntheta_e = logcombine(theta_e[0])\nsimplify(theta_e.subs(averageEstimateDict))\nalpha_star = simplify(alpha.subs(t, theta_e).doit())\n\n### Finding maximum of the curve and substituting ratio of taus\ng_E = Piecewise((0. * nS, t / ms < delta_e / ms), (g_e * (alpha / alpha_star),\n True))\n### Final equation for Excitation normalized to be maximum at $g_e$\n\n### Doing the same with inhibition\ng_I = g_E.xreplace({\n g_e: g_i,\n rho_e: rho_i,\n e_r: i_r,\n e_d: i_d,\n delta_e: delta_i\n})\nalpha_I = alpha.xreplace({e_r: i_r, e_d: i_d, delta_e: delta_i})\nalpha_star_I = alpha_star.xreplace({e_r: i_r, e_d: i_d})\ng_I = Piecewise((0. * nS, t / ms < delta_i / ms),\n (g_i * (alpha_I / alpha_star_I), True))",
"_____no_output_____"
],
[
"### Now finding the control peak using difference of these double-exponentials\ncompartment = Eq((1 / Cm) * (g_E * (Vm - e_rev) + g_I * (Vm - i_rev) + g_L *\n (Vm - leak_rev)), Vm.diff(t))\nVm_t = solve(compartment, Vm, rational=False, simplify=True)\ncheck_vm_t = Vm_t[0].subs({ i: averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e,g_i, P] }).subs(approximateDict).subs({ g_i: P * g_e })",
"_____no_output_____"
],
[
"### Now finding the control peak using difference of these double-exponentials (checking with this form of the equation)\ncompartment = Eq((1 / Cm) * (g_E * (e_rev - Vm) + g_I * (i_rev - Vm) + g_L *\n (leak_rev - Vm)), Vm.diff(t))\nVm_t = solve(compartment, Vm, rational=False, simplify=True)\ncheck_vm_t = Vm_t[0].subs({ i: averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e,g_i, P] }).subs(approximateDict).subs({ g_i: P * g_e })",
"_____no_output_____"
],
[
"f = lambdify((g_e, P, t), check_vm_t/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
]
],
[
[
"## 6 C Divisive Inhibition: Inhibition proportional to Excitation, or $g_i = P \\times g_e$",
"_____no_output_____"
]
],
[
[
"di_exc = [[float(f(e * nS, 0., dt * ms)) for dt in trange] for e in erange]\ndi_control = {prop: [[float(f(e * nS, prop, dt * ms)) for dt in trange] for e in erange] for prop in prop_array}",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\n# plt.style.context('neuron-color')\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max = [], []\n for con_trace,e_t in zip(di_control[prop], di_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max, v_max, s=10))\n ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Control $V_{max}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, di_control[prop][5])\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6c.pkl','wb'))\n# ax.set_title(\"Divisive Inhibition\")\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n ttp, e_max = [], []\n for con_trace,e_t in zip(di_control[prop], di_exc):\n ttp.append(numpy.argmax(con_trace))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max[1:], ttp[1:], s=10))\n ax.plot(e_max[1:], ttp[1:], '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Time to peak $t_{peak}$\")\nax.set_xlim(0,15)\nax.set_ylim(0,55)\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\n# dump(fig,file('figures/fig6/6g.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"handles, labels = [], []\nfor prop in prop_array:\n fig, ax = plt.subplots()\n ttp, e_max = [], []\n for con_trace,e_t in zip(di_control[prop], di_exc):\n ax.plot(trange, con_trace,c='k')\n fig.set_figwidth(12)\n plt.show()\n",
"_____no_output_____"
],
[
"threshold = 5.5\nfig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max, spk_t = [], [], []\n for con_trace,e_t in zip(di_control[prop], di_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n spiking = numpy.where((numpy.array(con_trace) - float(approximateDict[leak_rev]/mV)) > threshold)[0]\n if len(spiking):\n spk_t.append(spiking[0])\n else:\n spk_t.append(numpy.nan)\n# print(numpy.where(e_t>threshold))\n handles.append(ax.plot(erange, spk_t, '.-'))\n# ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Spike Time $t_{sp}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, dn_control[prop][5])\n# ax.hlines(y=threshold, linestyle='--')\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='right')\nfig.set_figwidth(2)\nfig.set_figheight(2)\nsimpleaxis(ax)\n#dump(fig,file('figures/fig6/6e.pkl','wb'))\n# ax.set_title(\"Divisive Normalization\", fontsize=18)\nplt.show()",
"_____no_output_____"
],
[
"print ( \"Constant $delta_i$ was {:.1f} ms\".format(averageEstimateDict[delta_i]/ms))",
"Constant $delta_i$ was 4.0 ms\n"
]
],
[
[
"6 DEF: Divisive Normalization: Inhibition proportional to Excitation, or $g_i = P \\times g_e$ and $\\delta_i$ inversely proportional to $g_e$",
"_____no_output_____"
],
[
"## 6 D Changing $\\delta_i$ = $\\delta_{min} + me^{-k\\times{g_e}}$",
"_____no_output_____"
]
],
[
[
"time_erange = numpy.linspace(0.,4.,10)",
"_____no_output_____"
],
[
"d = lambda minDelay,k,e: minDelay + m*exp(-(k*e))\nnS = nano*siemens\nk, m, minDelay = 1.43/nS, 18.15*ms, 2.54*ms\nmaxDelay = (minDelay + m)/ms",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nax.scatter(time_erange, [d(minDelay,k,e*nS)/ms for e in time_erange], s=40, facecolor='k', edgecolor='k')\nax.set_xlabel(\"$g_{exc}$ (nS)\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nax.set_xlim(0,4.5)\nax.set_ylim(0, 13 )\nax.set_xticks(range(4))\nax.set_yticks(range(0,13,2))\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6d.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"check_vm = simplify(Vm_t[0].subs({i:averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e, g_i, delta_i]}).subs(approximateDict).subs({g_i: P*g_e, delta_i: d(minDelay,k,g_e)}).evalf())\nf = lambdify((g_e, P, t), check_vm/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
],
[
"dn_exc = [[float(f(e * nS, 0., dt * ms)) for dt in trange] for e in erange]\ndn_control = {prop: [[float(f(e * nS, prop, dt * ms)) for dt in trange] for e in erange] for prop in prop_array}",
"_____no_output_____"
]
],
[
[
"## 6 E Divisive Normalization ",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max = [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max, v_max, s=10))\n ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Control $V_{max}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, dn_control[prop][5])\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6e.pkl','wb'))\n# ax.set_title(\"Divisive Normalization\", fontsize=18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Synapses to threshold",
"_____no_output_____"
]
],
[
[
"threshold = 5.5\nfig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max, spk_t = [], [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n spiking = numpy.where((numpy.array(con_trace) - float(approximateDict[leak_rev]/mV)) > threshold)[0]\n if len(spiking):\n spk_t.append(spiking[0])\n else:\n spk_t.append(numpy.nan)\n# print(numpy.where(e_t>threshold))\n handles.append(ax.plot(erange, spk_t, '.-'))\n# ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Spike Time $t_{sp}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, dn_control[prop][5])\n# ax.hlines(y=threshold, linestyle='--')\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='right')\nfig.set_figwidth(2)\nfig.set_figheight(2)\nsimpleaxis(ax)\n#dump(fig,file('figures/fig6/6e.pkl','wb'))\n# ax.set_title(\"Divisive Normalization\", fontsize=18)\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nthreshold = 5.5\nhandles, labels = [], []\nfor prop in prop_array[:1]:\n v_max, e_max, spk_t = [], [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n spiking = numpy.where((numpy.array(con_trace) - float(approximateDict[leak_rev]/mV)) >= threshold)[0]\n time = numpy.linspace(0., 100., len(con_trace))\n if len(spiking):\n spk_t.append(spiking[0])\n# print(spiking[0])\n ax.plot(time[:len(time)/4], numpy.array(con_trace[:len(time)/4]) - float(approximateDict[leak_rev]/mV))\n #ax.plot(time[spiking[0]], con_trace[spiking[0]] - float(approximateDict[leak_rev]/mV), 'o',markersize=4, color='k')\n else:\n spk_t.append(numpy.nan)\n# print(numpy.where(e_t>threshold))\n \n #handles.append(ax.plot(erange, spk_t, '.-'))\n# ax.plot(e_max, v_max, '--')\n #labels.append(\"$P= {}$\".format(prop))\nax.hlines(y=5, xmin=0, xmax=ax.get_xlim()[1], linestyles='--')\nax.set_ylim(0,10.)\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Spike Time $t_{sp}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, dn_control[prop][5])\n# ax.hlines(y=threshold, linestyle='--')\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='right')\nfig.set_figwidth(2)\nfig.set_figheight(2)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6e.pkl','wb'))\nax.set_title(\"Divisive Normalization\", fontsize=18)\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nfor prop in prop_array:\n v_max, e_max = [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n e_max = numpy.array(e_max)\n v_max = numpy.array(v_max)\n handles.append(ax.scatter(erange, e_max/v_max, s=10))\n ax.plot(erange, e_max/v_max, '--')\nax.set_xlabel(\"Excitation $g_{exc}$\")\nax.set_ylabel(\"Gain\")\nfig.set_figwidth(2)\nfig.set_figheight(2)\nsimpleaxis(ax)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 5 B Model subtraction scheme",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nprop = 4\n\ni_max, e_max = [], []\ntrace_c, trace_e = numpy.array(dn_control[prop][-1]), numpy.array(dn_exc[-1])\nax.plot(trange, trace_c, label=\"PSP\")\nax.plot(trange, trace_e, label=\"EPSP\")\ntrace_i = float(approximateDict[leak_rev]/mV) + (trace_c - trace_e)\nax.plot(trange, trace_i, label=\"Derived IPSP\")\nax.set_xlabel(\"Time\")\nax.set_ylabel(\"$V_m$\")\nfig.set_figwidth(3)\nfig.set_figheight(3)\nsimpleaxis(ax)\ndump(fig,file('figures/fig5/5b.pkl','wb'))\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 6 F Excitation - Derived Inhibition plot",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n i_max, e_max = [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n i_t = numpy.array(e_t) - numpy.array(con_trace)\n i_max.append(numpy.max(i_t))\n# i_max.append(max(e_t) - max(con_trace))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max, i_max, s=10))\n ax.plot(e_max, i_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Derived Inhibition $V_{max}$\")\nxlim = ax.get_xlim()\nax.set_ylim (xlim)\nax.plot(xlim, xlim, '--')\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6f.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 6 G Time to peak",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n ttp, e_max = [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n ttp.append(numpy.argmax(con_trace))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max[1:], ttp[1:], s=10))\n ax.plot(e_max[1:], ttp[1:], '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Time to peak $t_{peak}$\")\nax.set_xlim(0,15)\nax.set_ylim(0,55)\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6g.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"handles, labels = [], []\nfor prop in prop_array:\n fig, ax = plt.subplots()\n ttp, e_max = [], []\n for con_trace,e_t in zip(dn_control[prop], dn_exc):\n ax.plot(trange, con_trace,c='k')\n fig.set_figwidth(12)\n plt.show()",
"_____no_output_____"
]
],
[
[
"## 6 H Permutation of P",
"_____no_output_____"
]
],
[
[
"check_vm = simplify(Vm_t[0].subs({i:averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e, g_i, delta_i]}).subs(approximateDict).subs({delta_i: d(minDelay,k,g_e)}).evalf())\nf = lambdify((g_e, g_i, t), check_vm/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
],
[
"p_perm_dn_exc = [[float(f(e * nS, 0., dt * ms)) for dt in trange] for e in erange]\np_perm_dn_control = {prop: [[float(f(e * nS, i * nS, dt * ms)) for dt in trange] for (e,i) in zip(erange, numpy.random.permutation(erange*prop))] for prop in prop_array}",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max = [], []\n for con_trace,e_t in zip(p_perm_dn_control[prop], p_perm_dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max, v_max, s=10))\n ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Control $V_{max}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, p_perm_dn_control[prop][5])\n#ax2.set_xlabel(\"Time\")\n#ax2.set_ylabel(\"Membrane potential (mV)\")\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6h.pkl','wb'))\n# ax.set_title(\"Divisive Normalization with E and I balance permuted\", fontsize=18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 6 I Permutation of $\\delta_i$",
"_____no_output_____"
]
],
[
[
"check_vm = simplify(Vm_t[0].subs({i:averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e, g_i, delta_i]}).subs(approximateDict).subs({g_i: P*g_e}).evalf())\nf = lambdify((g_e, P, delta_i, t), check_vm/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
],
[
"d_perm_dn_exc = [[float(f(e * nS, 0., d(minDelay,k, e* nS), dt * ms)) for dt in trange] for e in erange]\nd_perm_dn_control = {prop: [[float(f(e * nS, prop, delay, dt * ms)) for dt in trange] for e,delay in zip(erange, numpy.random.permutation([d(minDelay,k, e* nS) for e in erange])) ] for prop in prop_array}",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nhandles, labels = [], []\nfor prop in prop_array:\n v_max, e_max = [], []\n for con_trace,e_t in zip(d_perm_dn_control[prop], d_perm_dn_exc):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n handles.append(ax.scatter(e_max, v_max, s=10))\n ax.plot(e_max, v_max, '--')\n labels.append(\"$P= {}$\".format(prop))\nax.set_xlabel(\"Excitation $V_{max}$\")\nax.set_ylabel(\"Control $V_{max}$\")\n# left, bottom, width, height = [0.25, 0.6, 0.2, 0.2]\n# ax2 = fig.add_axes([left, bottom, width, height])\n# for prop in prop_array:\n# ax2.plot(trange, d_perm_dn_control[prop][5])\n# fig.legend(handles, labels, loc ='center right')\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\ndump(fig,file('figures/fig6/6i.pkl','wb'))\n# ax.set_title(\"Divisive Normalization\", fontsize=18)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 6 J Phase plot Divisive Normalization",
"_____no_output_____"
]
],
[
[
"import lmfit\ndef DN_model(x,a=1):\n # Divisive normalization model\n return (a*x)/(x+a)\nDN_Model = lmfit.Model(DN_model)",
"_____no_output_____"
],
[
"check_vm = simplify(Vm_t[0].subs({i:averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e, g_i, delta_i]}).subs(approximateDict).subs({g_i: P*g_e}).evalf())\nf = lambdify((g_e, P, delta_i, t), check_vm/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
],
[
"inhib = simplify(Vm_t[0].subs({i:averageEstimateDict[i] for i in averageEstimateDict if i not in [g_e, g_i, delta_i]}).subs(approximateDict).evalf())\ng = lambdify((g_e, g_i, delta_i, t), inhib/mV, (unitsDict, \"numpy\"))",
"_____no_output_____"
],
[
"phase_dn_control = {}\nphase_dn_exc = {}\nphase_dn_inh = {}\n\n# prop_array = numpy.logspace(-1,1,7)\n# k_array = numpy.logspace(-1,1,7)\n\nprop_array = numpy.linspace(0,6,7)\nk_array = numpy.linspace(0.,3.,7)",
"_____no_output_____"
],
[
"for k in k_array:\n phase_dn_exc[k] = [[float(f(e * nS, 0., d(minDelay,k/nS, e* nS), dt * ms)) for dt in trange] for e in erange]\n phase_dn_control[k] = {prop: [[float(f(e * nS, prop, delay, dt * ms)) for dt in trange] for e,delay in zip(erange, [d(minDelay,k/nS, e* nS) for e in erange]) ] for prop in prop_array}\n# phase_dn_inh[k] = {prop: [[float(g(0 * nS, prop*e, delay, dt * ms)) for dt in trange] for e,delay in zip(erange, [d(minDelay,k/nS, e* nS) for e in erange]) ] for prop in prop_array}",
"_____no_output_____"
],
[
"phase_dn_inh = {}\nfor k in k_array:\n phase_dn_inh[k] = {prop: [[float(g(0 * nS, prop*e* nS, delay, dt * ms)) for dt in trange] for e,delay in zip(erange, [d(minDelay,k/nS, e* nS) for e in erange]) ] for prop in prop_array}",
"_____no_output_____"
],
[
"phaseMat_init = numpy.zeros((len(k_array),len(prop_array)))\n\nfor ind1, k in enumerate(k_array):\n for ind2, prop in enumerate(prop_array):\n v_max, e_max = [], []\n for con_trace,e_t in zip(phase_dn_control[k][prop], phase_dn_exc[k]):\n v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n X, y = e_max, v_max\n DN_pars = DN_Model.make_params()\n DN_result = DN_Model.fit(y, DN_pars, x=X)\n\n# plt.plot(X, y, 'bo')\n# plt.plot(X, DN_result.best_fit, 'r-')\n# plt.xlim(0,1.2*max(e_max))\n# plt.ylim(0,1.2*max(e_max))\n# plt.show()\n phaseMat_init[ind1][ind2] = DN_result.params['a']\n# print(DN_result.fit_report())\n\n# x,y = numpy.meshgrid(prop_array, k_array)\n\n#cmap = LinearSegmentedColormap.from_list('gamma_purple', [(0.,'purple' ), (1., 'white')])\ncmap = matplotlib.cm.inferno_r\ncmap.set_bad(color='white')\n\nprint (\"Max gamma is {}\".format(numpy.max(phaseMat_init)))\ngamma_cutOff = 40\ncutOffmask = numpy.ma.masked_where(phaseMat_init > gamma_cutOff, phaseMat_init)\nphaseMat = numpy.ma.masked_where(numpy.isnan(phaseMat_init), cutOffmask)\n\nvmax = numpy.nanmax(phaseMat)\nvmin = numpy.nanmin(phaseMat)\n\nfig, ax = plt.subplots()\n\nphaseMat \n#heatmap = ax.pcolormesh(phaseMat, norm=matplotlib.colors.LogNorm(vmin=vmin, vmax=vmax), cmap = cmap, edgecolor='k', linewidths=.05)\nheatmap = ax.pcolormesh(phaseMat, vmin=0, vmax=gamma_cutOff, cmap = cmap, edgecolor='k', linewidths=.05)\n# ax.grid(True, which='minor', axis='both', linestyle='--', alpha=0.1, color='k')\n\nax.invert_yaxis()\nticks = numpy.arange(0,len(prop_array),2)\nax.xaxis.set_ticks(ticks+0.5)\nax.yaxis.set_ticks(ticks+0.5)\n\nax.yaxis.set(ticklabels=[\"{:.0f}\".format(j) for j in k_array[ticks]])\nax.xaxis.set(ticklabels=[\"{:.0f}\".format(j) for j in prop_array[ticks]])\n\n\n# ax.axis([int(k_array.min()),int(k_array.max()),int(prop_array.min()),int(prop_array.max())])\n# for axis in [ax.xaxis, ax.yaxis]:\n# axis.set_ticks([0,10,10], minor=True)\n# axis.set(ticks=[0,10,10], ticklabels=numpy.linspace(0,10,10)) #Skipping square labels\n\n# ax.set_xlim((-1,1))\n# ax.set_ylim((-1,1))\n\n#Colorbar stuff\ncbar = plt.colorbar(heatmap, label = \"$\\\\gamma$\", ticks=[0,20,40])\ncbar.ax.get_yaxis().labelpad = 6\n# tick_locator = matplotlib.ticker.MaxNLocator(nbins=5)\n# cbar.locator = tick_locator\n# cbar.update_ticks()\n\n# ax.patch.set(hatch='xx', edgecolor='purple')\n\nsimpleaxis(ax,every=True,outward=False)\nax.set_aspect(1)\nfig.set_figwidth(2.)\nfig.set_figheight(2.)\n\nax.set_ylabel(\"K\")\nax.set_xlabel(\"I/E\")\n# ax.set_title(\"Divisive Normalization\", fontsize=18)\n\ndump(fig,file('figures/supplementary/11a.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"print (k_array)",
"_____no_output_____"
]
],
[
[
"### Delay plots",
"_____no_output_____"
]
],
[
[
"d = lambda minDelay,k,e: minDelay + m*exp(-(k*e))\nnS = nano*siemens\nm, minDelay = 18.15*ms, 2.54*ms\nmaxDelay = (minDelay + m)/ms",
"_____no_output_____"
],
[
"k_sample_indices = [1,3,5]\nfig, ax = plt.subplots(len(k_array[k_sample_indices]),1,sharey=True)\nfor axis,k in zip(ax,k_array[k_sample_indices]):\n axis.plot(time_erange, [d(minDelay,k/nS,e*nS)/ms for e in time_erange], '.-', c='k', markersize=5)\n axis.set_xlim(0,4.5)\n axis.set_ylim(0, 13 )\n axis.set_xticks(range(4))\n axis.set_yticks(range(0,13,6))\n axis.set_title(\"k={}\".format(k))\nax[0].set_ylabel(\"$\\\\delta_i$ (ms)\")\nax[-1].set_xlabel(\"$g_{exc}$ (nS)\")\nsimpleaxis(ax,hideTitle=False)\nfig.set_figwidth(1)\nfig.set_figheight(3)\ndump(fig,file('figures/supplementary/11b.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### I/E differences",
"_____no_output_____"
]
],
[
[
"ie_sample_indices = [1,3,6]\nfig, ax = plt.subplots(1,3,sharey=True)\nfor axis,i_by_e in zip(ax, prop_array[ie_sample_indices]):\n axis.plot(erange, i_by_e * erange, '.-', c='k', markersize=5)\n axis.set_xlabel(\"$g_{exc}$ (nS)\")\n axis.set_xlim(0,4.5)\n axis.set_xticks(range(4))\n# axis.set_yticks(range(0,13,2))\n# axis.set_title(\"I/E={}\".format(i_by_e))\nax[0].set_ylabel(\"$g_{inh}$ (nS)\") \nsimpleaxis(ax,hideTitle=False)\nfig.set_figwidth(3)\nfig.set_figheight(1)\ndump(fig,file('figures/supplementary/11c.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### DN traces for these values",
"_____no_output_____"
]
],
[
[
"fig, ax = plt.subplots(len(k_sample_indices), len(ie_sample_indices), sharex=True, sharey=True)\nsm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=gamma_cutOff))\nfor ind1,k_index in enumerate(k_sample_indices):\n for ind2,prop_index in enumerate(ie_sample_indices):\n k, prop = k_array[k_index], prop_array[prop_index]\n for trace in phase_dn_control[k][prop]:\n if phaseMat[k_index][prop_index]:\n ax[ind1][ind2].plot(trange, trace, c=sm.to_rgba(float(phaseMat[k_index][prop_index])), linewidth=1)\n else:\n ax[ind1][ind2].plot(trange, trace, c='k', linewidth=1)\n# ax[ind1][ind2].set_title(\"K={},I/E={}\".format(k,prop))\nsimpleaxis(fig.get_axes(),hideTitle=False)\nfig.set_figwidth(3)\nfig.set_figheight(3)\ndump(fig,file('figures/supplementary/11d.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### SDN curve for these values",
"_____no_output_____"
]
],
[
[
"sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=gamma_cutOff))\nfig, ax = plt.subplots(len(k_sample), len(ie_sample), sharex=True, sharey=True)\nfor ind1,k_index in enumerate(k_sample_indices):\n for ind2,prop_index in enumerate(ie_sample_indices):\n k, prop = k_array[k_index], prop_array[prop_index] \n \n obs_sdn = numpy.array([numpy.max(trace) for trace in phase_dn_control[k][prop]]) - float(approximateDict[leak_rev]/mV)\n exp_sdn = numpy.array([numpy.max(trace) for trace in phase_dn_exc[k]]) - float(approximateDict[leak_rev]/mV)\n \n if phaseMat[k_index][prop_index]:\n ax[ind1][ind2].plot(exp_sdn, obs_sdn, '.-', c=sm.to_rgba(float(phaseMat[k_index][prop_index])), markersize=5, linewidth=1)\n ax[ind1][ind2].set_title(\"$\\gamma$ = \" + \"{:.2f}\".format(phaseMat_init[k_index][prop_index]))\n# ax[ind1][ind2].set_title(\"K={}, I/E={}, \".format(k,prop) + \"$\\gamma$ = \" + \"{:.2e}\".format(phaseMat_init[k_index][prop_index]))\n else:\n ax[ind1][ind2].plot(exp_sdn, obs_sdn, '.-', c='k', markersize=5, linewidth=1)\n #ax[ind1][ind2].set_title(\"$\\gamma$ > 40\")\n# ax[ind1][ind2].set_title(\"K={}, I/E={}, \".format(k,prop) + \"$\\gamma$ = \" + \"{:.2e}\".format(phaseMat_init[k_index][prop_index]))\n\n\n# if phaseMat[k_index][prop_index]:\n# print (k_index, prop_index)\n# ax[ind1][ind2].set_title(\"$\\gamma$ = \" + \"{:.2f}\".format(phaseMat_init[k_index][prop_index]))\n# else:\n# print (\"Didn't work, {},{}\".format(k_index, prop_index))\n# ax[ind1][ind2].set_title(\"$\\gamma$ > 40\")\nsimpleaxis(fig.get_axes(),hideTitle=False)\nfig.set_figwidth(3)\nfig.set_figheight(3)\ndump(fig,file('figures/supplementary/11e.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"exp_sdn, obs_sdn",
"_____no_output_____"
],
[
"k = k_array[4]\np = prop_array[4]\n\nnumColors = 10\ncm = matplotlib.cm.viridis_r\ncgen = (cm(1.*i/numColors) for i in range(numColors))\nmaxTime = 200\n\nfig, ax = plt.subplots()\nfor con_trace,exc_trace,inh_trace in zip(phase_dn_control[k][prop][1:], phase_dn_exc[k][1:], phase_dn_inh[k][prop][1:]):\n c = cgen.next()\n ax.plot(con_trace[:maxTime], '-', linewidth=2, c=c)\n ax.plot(exc_trace[:maxTime], '-', linewidth=2, c=c)\n ax.plot( [-65 - (a - b) for a,b in zip(exc_trace[:maxTime],con_trace[:maxTime])], '-', linewidth=2, c=c)\n# ax.plot(inh_trace[:maxTime], '-', linewidth=2, c=c)\n ax.hlines(y=max(con_trace[:maxTime]), xmin=0, xmax=maxTime, linestyles='--')\n# ax.hlines(y=max(con_trace[:maxTime]))\nsimpleaxis(ax,every=True)\nfig.set_figheight(15)\nfig.set_figwidth(15)\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nfor inh_trace in phase_dn_inh[k][p]:\n ax.plot(inh_trace)\nplt.show()",
"_____no_output_____"
],
[
"len(phase_dn_inh[8])",
"_____no_output_____"
],
[
"# for ind1, k in enumerate(k_array):\n# for ind2, prop in enumerate(prop_array):\n# v_max, e_max = [], []\n# for con_trace,e_t in zip(phase_dn_control[k][prop], phase_dn_exc[k]):\n# v_max.append(max(con_trace) - float(approximateDict[leak_rev]/mV))\n# e_max.append(max(e_t) - float(approximateDict[leak_rev]/mV))\n# X, y = e_max, v_max\n# DN_pars = DN_Model.make_params()\n# DN_result = DN_Model.fit(y, DN_pars, x=X)\n# print (k, prop)\n# print(DN_result.fit_report())\n# f,ax = plt.subplots()\n# DN_result.plot_fit(ax)\n# plt.show()",
"_____no_output_____"
]
],
[
[
"## 6 K $\\delta_i$ as a function of $g_e$",
"_____no_output_____"
]
],
[
[
"prefix = '/home/bhalla/Documents/Codes/data'\nn = Neuron.load(prefix + '/media/sahil/NCBS_Shares_BGStim/patch_data/170720/c5_EI/plots/c5_EI.pkl')",
"_____no_output_____"
],
[
"def delay_excitation(x, a=1., b=1., c=1.):\n # Delay as a function of excitation\n# return a + b*numpy.exp(-c*x)\n return a+(x/b)",
"_____no_output_____"
],
[
"def findOnsetTime(trial, step=0.5, slide = 0.05, minOnset = 2., maxOnset = 50., initpValTolerance=1.0, pValMinTolerance = 0.1):\n maxIndex = int(trial.F_sample*maxOnset*1e-3)\n if expType == 1:\n maxOnsetIndex = numpy.argmax(-trial.interestWindow[:maxIndex])\n elif expType == 2:\n maxOnsetIndex = numpy.argmax(trial.interestWindow[:maxIndex])\n else:\n maxOnsetIndex = numpy.argmax(trial.interestWindow[:maxIndex])\n \n window_size = len(trial.interestWindow)\n step_size = int(trial.F_sample*step*1e-3)\n \n overlap = int(trial.F_sample*slide*1e-3)\n \n index_right = maxOnsetIndex\n index_left = index_right - step_size\n minOnsetIndex = int(trial.F_sample*minOnset*1e-3)\n \n baseMean = numpy.mean(trial.interestWindow[:minOnsetIndex])\n factor = 5\n thresholdGradient = 0.01\n pValTolerance = initpValTolerance\n\n l_window = trial.interestWindow[:minOnsetIndex]\n while (index_left>minOnset):\n r_window = trial.interestWindow[index_left:index_right] #, trial.baselineWindow #trial.interestWindow[index_left - step_size:index_left]\n stat, pVal = ss.ks_2samp(r_window, l_window)\n if pVal>pValTolerance:\n return float(index_right)/trial.F_sample\n\n else:\n index_left-=overlap\n index_right-=overlap\n if index_left<=minOnsetIndex:\n pValTolerance/=2\n if pValTolerance<pValMinTolerance:\n# print (\"Returning Nan\")\n return numpy.nan\n else:\n index_right = maxOnsetIndex\n index_left = maxOnsetIndex - step_size",
"_____no_output_____"
],
[
"# avg_exc_onset = {}\n# avg_inh_onset = {}\n# avg_exc_max = {}\n# exc_onsets, inh_onsets = {}, {}\n# exc_max,inh_max = {}, {}\n# err_inh_onsets = {}\n# scalingFactor = 1e6\n# for expType, experiment in n:\n# for sqr in experiment:\n# for coord in experiment[sqr].coordwise:\n# if expType == 1:\n# for trial in experiment[sqr].coordwise[coord].trials:\n# exc_onsets[(sqr,trial.index)] = findOnsetTime(trial)*1e3\n# exc_max[(sqr,trial.index)] = -trial.feature[5]*scalingFactor\n# #exp[sqr].coordwise[coord].average_feature[5]\n# if expType == 2:\n# list_inh_onset = []\n# for trial in experiment[sqr].coordwise[coord].trials:\n# inh_onsets[(sqr,trial.index)] = findOnsetTime(trial)*1e3\n# list_inh_onset.append(inh_onsets[(sqr,trial.index)])\n# inh_max[(sqr,trial.index)] = trial.feature[0]*scalingFactor\n# avg_onset = numpy.nanmean([onset for onset in list_inh_onset if onset])\n# err_onset = numpy.nanstd([onset for onset in list_inh_onset if onset])\n# for trial in experiment[sqr].coordwise[coord].trials:\n# avg_inh_onset[(sqr,trial.index)] = avg_onset\n# err_inh_onsets[(sqr,trial.index)] = err_onset\n#print (avg_exc_max, avg_exc_onset, avg_inh_onset)",
"_____no_output_____"
],
[
"avg_exc_onset = {}\navg_inh_onset = {}\navg_exc_max = {}\nexc_onsets, inh_onsets = {}, {}\nexc_max,inh_max = {}, {}\nerr_exc_onset, err_inh_onset = {}, {}\nscalingFactor = 1e6\n\nfor expType, experiment in n:\n for sqr in experiment:\n for coord in experiment[sqr].coordwise:\n if expType == 1:\n list_exc_onset = []\n list_exc_max = []\n for trial in experiment[sqr].coordwise[coord].trials:\n onsetTime = findOnsetTime(trial)\n if onsetTime:\n exc_onsets[(sqr,trial.index)] = onsetTime*1e3\n list_exc_onset.append(exc_onsets[(sqr,trial.index)])\n list_exc_max.append(-trial.feature[5]*scalingFactor)\n #exp[sqr].coordwise[coord].average_feature[5]\n avg_exc_onset[coord] = numpy.nanmean([onset for onset in list_exc_onset if onset])\n err_exc_onset[coord] = numpy.nanstd([onset for onset in list_exc_onset if onset])\n exc_max[coord] = numpy.nanmean([maxC for maxC in list_exc_max if maxC])\n# for trial in experiment[sqr].coordwise[coord].trials:\n# avg_exc_onset[(sqr,trial.index)] = avg_onset\n# err_exc_onsets[(sqr,trial.index)] = err_onset\n if expType == 2:\n list_inh_onset = []\n for trial in experiment[sqr].coordwise[coord].trials:\n onsetTime = findOnsetTime(trial)\n if onsetTime:\n inh_onsets[(sqr,trial.index)] = onsetTime*1e3\n list_inh_onset.append(inh_onsets[(sqr,trial.index)])\n inh_max[(sqr,trial.index)] = trial.feature[0]*scalingFactor\n avg_inh_onset[coord] = numpy.nanmean([onset for onset in list_inh_onset if onset])\n err_inh_onset[coord] = numpy.nanstd([onset for onset in list_inh_onset if onset])\n# for trial in experiment[sqr].coordwise[coord].trials:\n# avg_inh_onset[(sqr,trial.index)] = avg_onset\n# err_inh_onsets[(sqr,trial.index)] = err_onset",
"_____no_output_____"
],
[
"delay, max_current = [], []\ndel_err, max_err= [], []\ninhibOnset = []\nconductanceConversion = 70e-3\nfor key in set(avg_exc_onset).intersection(set(avg_inh_onset)):\n if avg_inh_onset[key] and avg_exc_onset[key]:\n if not numpy.isnan(avg_inh_onset[key]) and not numpy.isnan (avg_exc_onset[key]) and not numpy.isnan (exc_max[key]):\n delay.append(avg_inh_onset[key]- avg_exc_onset[key])\n max_current.append(exc_max[key])\n# del_err.append(err_inh_onset[key])\n inhibOnset.append(avg_inh_onset[key])\nmaxConductance = numpy.array(max_current)/conductanceConversion\n# del_err.append()\n# max_err.append()",
"_____no_output_____"
],
[
"delay_Model = lmfit.Model(delay_excitation)\ndelay_pars = delay_Model.make_params()\n\ndelay = numpy.array(delay)\nmaxConductance = numpy.array(maxConductance)\n# print (delay_result.params)\n# print (delay_result.aic)\n# print (delay_result.redchi)",
"_____no_output_____"
],
[
"delay_result = delay_Model.fit(delay, delay_pars, x=maxConductance)",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nax.scatter(maxConductance, delay)\nax.set_ylim(0,)\nplt.show()",
"_____no_output_____"
],
[
"delay_result = delay_Model.fit(delay, delay_pars, x=maxConductance)\nfig, ax = plt.subplots()\nindices = numpy.argsort(maxConductance)\nax.scatter(maxConductance[indices], delay[indices], s=30, facecolor='k', edgecolor='k')\nax.plot(maxConductance[indices], delay_result.best_fit[indices], '-')\n# print(conductance_std, delay_std)\n# ax.errorbar(conductance_mean, delay_mean, xerr = conductance_std, yerr= delay_std, linestyle='',c='k')\nax.set_xticks(range(4))\nax.set_yticks(range(0,12,2))\nax.set_xlim(0,4.5)\nax.set_ylim(-3,12.5)\nax.set_xlabel(\"$g_e$ (nS)\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\n# dump(fig,file('figures/fig6/6k.pkl','wb'))\nplt.show()\n# print (\"{:.2f} + {:.2f}e^-{:.2f}E\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))\nprint (\"{:.2f} + E^-{:.2f}\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))\nprint(delay_result.fit_report())",
"_____no_output_____"
]
],
[
[
"### Binning delays here",
"_____no_output_____"
]
],
[
[
"bins = numpy.linspace(0,max(maxConductance),6)\ndigitized = numpy.digitize(maxConductance, bins)\nconductance_mean = [maxConductance[digitized == i].mean() for i in range(len(bins))]\ndelay_mean = [delay[digitized == i].mean() for i in range(len(bins))]\nconductance_std = [maxConductance[digitized == i].std(ddof=1) for i in range(len(bins))]\ndelay_std = [delay[digitized == i].std(ddof=1) for i in range(len(bins))]\n\ndelay_mean, conductance_mean, delay_std, conductance_std = map(list, zip(*[ (d,c,sd,sc) for d,c,sd,sc in zip(delay_mean, conductance_mean, delay_std, conductance_std) if not any(numpy.isnan([d,c,sd,sc]))]))",
"_____no_output_____"
],
[
"print (\"{:.2f} + {:.2f}e^-{:.2f}E\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))",
"_____no_output_____"
],
[
"delay_result = delay_Model.fit(delay_mean, delay_pars, x=conductance_mean)\nfig, ax = plt.subplots()\nax.scatter(conductance_mean, delay_mean, s=30, facecolor='k', edgecolor='k')\n# ax.plot(conductance_mean, delay_result.best_fit, '-')\nprint(conductance_std, delay_std)\nax.errorbar(conductance_mean, delay_mean, xerr = conductance_std, yerr= delay_std, linestyle='',c='k')\nax.set_xticks(range(4))\nax.set_yticks(range(0,12,2))\nax.set_xlim(0,4.5)\nax.set_ylim(0,12.5)\nax.set_xlabel(\"$g_e$ (nS)\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\nsimpleaxis(ax)\n# dump(fig,file('figures/fig6/6k.pkl','wb'))\nplt.show()\nprint (\"{:.2f} + {:.2f}e^-{:.2f}E\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))",
"_____no_output_____"
],
[
"delay_result = delay_Model.fit(delay, delay_pars, x=maxConductance)\nfig, ax = plt.subplots()\nax.errorbar(numpy.array(maxConductance), numpy.array(delay), fmt ='o', markersize=2, alpha=0.4)\n#ax.scatter(numpy.array(maxConductance)*1e6, numpy.array(delay)*1e3)\ncurrent_linspace= numpy.linspace(0,1.1*numpy.max(maxConductance))\nax.plot(current_linspace, delay_result.eval(x=current_linspace), '-', label=\"${:.2f} + {:.2f} \\\\times e^{{-{:.2f} \\\\times E }}$\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))\n\nax.plot(1./(delay_result.params['c'].value), delay_result.eval(x=1./(delay_result.params['c'].value)), 'ko', markersize=2)\nxmin, xmax = ax.get_xlim()\nax.hlines(y=0, xmin=xmin, xmax=xmax, linestyles='--', alpha=0.5)\nax.hlines(y=delay_result.params['a'].value, xmin=xmin, xmax=xmax, linestyles='--', alpha=0.5)\nax.set_xlabel(\"$g_{max}^{exc}$\")\nax.set_ylabel(\"Delay $(\\\\delta_{inh})$\")\n\nax.annotate(\"\",\n xy=(xmax, 0.), xycoords='data',\n xytext=(xmax, delay_result.params['a'].value), textcoords='data',\n arrowprops=dict(arrowstyle=\"<->\",\n connectionstyle=\"arc3\"),\n )\nax.text(1.01*xmax, 1., \"$\\\\delta_{min}$\")\n\nax.annotate(\"\",\n xy=(0, 0), xycoords='data',\n xytext=(0, delay_result.params['b'].value + delay_result.params['a'].value), textcoords='data',\n arrowprops=dict(arrowstyle=\"<->\",\n connectionstyle=\"arc3\"),\n )\nax.text(xmin*1.5, 10., \"$\\\\delta_{max}$\")\n\nax.annotate(\"\",\n xy=(xmax, delay_result.params['a'].value), xycoords='data',\n xytext=(xmax, delay_result.params['b'].value + delay_result.params['a'].value), textcoords='data',\n arrowprops=dict(arrowstyle=\"<->\",\n connectionstyle=\"arc3\"),\n )\nax.text(1.01*xmax, 10., \"$m$\")\n\n# ax.text(0.006, 6., \"$k$\")\nax.set_xlim(xmax= xmax*1.1)\nsimpleaxis(ax)\nplt.legend()\nfig.set_figwidth(6)\nfig.set_figheight(6)\n# dump(fig,file('figures/fig6/6k.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Over all EI cells",
"_____no_output_____"
]
],
[
[
"voltageClampFiles = '/media/sahil/NCBS_Shares_BGStim/patch_data/voltage_clamp_files.txt'",
"_____no_output_____"
],
[
"with open (voltageClampFiles,'r') as r:\n dirnames = r.read().splitlines()",
"_____no_output_____"
],
[
"a = ['161220 c2_EI',\n '170510 c2_EI',\n '170524 c3_EI',\n '170524 c1_EI',\n '170530 c2_EI',\n '170530 c1_EI',\n '170531 c2_EI',\n '170531 c4_EI',\n '170531 c1_EI',\n '170720 c5_EI',\n '170720 c3_EI',\n '170720 c4_EI',\n '170720 c2_EI']\ndirnames = (['/home/bhalla/Documents/Codes/data/media/sahil/NCBS_Shares_BGStim/patch_data/' + '/'.join(j.split(' ')) + '/' for j in a])",
"_____no_output_____"
],
[
"#Colorscheme for cells\ncolor_cell = matplotlib.cm.plasma(numpy.linspace(0,1,len(dirnames)))",
"_____no_output_____"
],
[
"neurons = []\nfor dirname in dirnames:\n cellIndex = dirname.split('/')[-2]\n filename = dirname + 'plots/' + cellIndex + '.pkl'\n neurons.append(Neuron.load(filename))",
"_____no_output_____"
],
[
"all_delays = []\nall_conductances = []\nall_inh_conductances = []\nscalingFactor = 1e6\nfor index, n in enumerate(neurons):\n avg_exc_onset = {}\n avg_inh_onset = {}\n avg_exc_max = {}\n exc_onsets, inh_onsets = {}, {}\n exc_max,inh_max = {}, {}\n err_exc_onset, err_inh_onset = {}, {}\n\n for expType, experiment in n:\n for sqr in experiment:\n for coord in experiment[sqr].coordwise:\n if expType == 1:\n list_exc_onset = []\n list_exc_max = []\n for trial in experiment[sqr].coordwise[coord].trials:\n onsetTime = findOnsetTime(trial)\n if onsetTime:\n exc_onsets[(sqr,trial.index)] = onsetTime*1e3\n list_exc_onset.append(exc_onsets[(sqr,trial.index)])\n list_exc_max.append(-trial.feature[5]*scalingFactor)\n #exp[sqr].coordwise[coord].average_feature[5]\n avg_exc_onset[coord] = numpy.nanmean([onset for onset in list_exc_onset if onset])\n err_exc_onset[coord] = numpy.nanstd([onset for onset in list_exc_onset if onset])\n exc_max[coord] = numpy.nanmean([maxC for maxC in list_exc_max if maxC])\n # for trial in experiment[sqr].coordwise[coord].trials:\n # avg_exc_onset[(sqr,trial.index)] = avg_onset\n # err_exc_onsets[(sqr,trial.index)] = err_onset\n if expType == 2:\n list_inh_onset = []\n list_inh_max = []\n for trial in experiment[sqr].coordwise[coord].trials:\n onsetTime = findOnsetTime(trial)\n if onsetTime:\n inh_onsets[(sqr,trial.index)] = onsetTime*1e3\n list_inh_onset.append(inh_onsets[(sqr,trial.index)])\n list_inh_max.append(trial.feature[0]*scalingFactor)\n avg_inh_onset[coord] = numpy.nanmean([onset for onset in list_inh_onset if onset])\n err_inh_onset[coord] = numpy.nanstd([onset for onset in list_inh_onset if onset])\n inh_max[coord] = numpy.nanmean([maxC for maxC in list_inh_max if maxC])\n\n delay, max_conductance, max_inh_conductance = [], [], []\n inhibOnset = []\n conductanceConversion = 70e-3\n for key in set(avg_exc_onset).intersection(set(avg_inh_onset)):\n if avg_inh_onset[key] and avg_exc_onset[key]:\n if not numpy.isnan(avg_inh_onset[key]) and not numpy.isnan (avg_exc_onset[key]) and not numpy.isnan (exc_max[key]) and not numpy.isnan (inh_max[key]):\n delay.append(avg_inh_onset[key]- avg_exc_onset[key])\n max_conductance.append(exc_max[key]/conductanceConversion)\n max_inh_conductance.append(inh_max[key]/conductanceConversion)\n all_delays.append(delay)\n all_conductances.append(max_conductance)\n all_inh_conductances.append(max_inh_conductance)\n print (\"Done {}\".format(index))",
"_____no_output_____"
],
[
"# all_delays = []\n# all_conductances = []\n# all_inh_conductances = []\n# scalingFactor = 1e6\n# for index, n in enumerate(neurons):\n# avg_exc_onset = {}\n# avg_inh_onset = {}\n# avg_exc_max = {}\n# exc_onsets, inh_onsets = {}, {}\n# exc_max,inh_max = {}, {}\n# err_inh_onsets = {}\n# for expType, experiment in n:\n# for sqr in experiment:\n# for coord in experiment[sqr].coordwise:\n# if expType == 1:\n# exc_onsets[(sqr,coord)] = []\n# exc_max[(sqr,coord)] = []\n# for trial in experiment[sqr].coordwise[coord].trials:\n# onsetTime = findOnsetTime(trial)\n# if onsetTime:\n# exc_onsets[(sqr,coord)].append(onsetTime*1e3)\n# exc_max[(sqr,coord)].append(-trial.feature[5]*scalingFactor)\n# #exp[sqr].coordwise[coord].average_feature[5]\n# exc_onsets[(sqr,coord)] = numpy.nanmean(exc_onsets[(sqr,coord)])\n# exc_max[(sqr,coord)] = numpy.nanmean(exc_max[(sqr,coord)])\n \n# if expType == 2:\n# inh_onsets[(sqr,coord)] = []\n# inh_max[(sqr,coord)] = []\n# #list_inh_onset = []\n# for trial in experiment[sqr].coordwise[coord].trials:\n# onsetTime = findOnsetTime(trial)\n# if onsetTime:\n# inh_onsets[(sqr,coord)].append(onsetTime*1e3)\n# #list_inh_onset.append(onsetTime*1e3)\n# inh_max[(sqr,coord)].append(trial.feature[0]*scalingFactor)\n# #avg_onset = numpy.nanmean([onset for onset in list_inh_onset if onset])\n# #err_onset = numpy.nanstd([onset for onset in list_inh_onset if onset])\n# # for trial in exp[sqr].coordwise[coord].trials:\n# # avg_inh_onset[(sqr,trial.index)] = avg_onset\n# # err_inh_onsets[(sqr,trial.index)] = err_onset\n# inh_onsets[(sqr,coord)] = numpy.nanmean(inh_onsets[(sqr,coord)])\n# inh_max[(sqr,coord)] = numpy.nanmean(inh_max[(sqr,coord)])\n \n# delay, max_conductance, max_inh_conductance = [], [], []\n# # del_err, max_err= [], []\n# inhibOnset = []\n# conductanceConversion = 70e-3\n# for key in set(exc_onsets).intersection(set(inh_onsets)):\n# if inh_onsets[key] and exc_onsets[key]:\n# # print (\"Doing {}\".format(index))\n# # print (inh_onsets[key], exc_onsets[key], exc_max[key])\n# if not numpy.isnan(inh_onsets[key]) and not numpy.isnan (exc_onsets[key]) and not numpy.isnan (exc_max[key]) and not numpy.isnan (inh_max[key]):\n# # print (\"Delay is {}\".format(inh_onsets[key]- exc_onsets[key]))\n# delay.append(inh_onsets[key]- exc_onsets[key])\n# max_conductance.append(exc_max[key]/conductanceConversion)\n# max_inh_conductance.append(inh_max[key]/conductanceConversion)\n# all_delays.append(delay)\n# all_conductances.append(max_conductance)\n# all_inh_conductances.append(max_inh_conductance)\n# print (\"Done {}\".format(index))",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\ncmap = matplotlib.cm.viridis\n\ncolors = matplotlib.cm.viridis(numpy.linspace(0, 1, len(all_inh_conductances)))\n\n# norm = matplotlib.colors.Normalize(vmin=1, vmax=6)\nslopeArr = []\nfor i, (g, gi, d, c) in enumerate(zip(all_conductances, all_inh_conductances, all_delays, colors)):\n g, gi, d = numpy.array(g), numpy.array(gi), numpy.array(d)\n indices = numpy.argsort(g)\n #slope, intercept, rval, pvalue, err = ss.linregress(g[indices], gi[indices])\n #cbar = ax.scatter(g,d, c= [slope]*len(g), s= 10, cmap='viridis', vmin=1.5, vmax=3.2)\n slope, intercept, lowConf, upperConf = ss.mstats.theilslopes(x=gi[indices], y=d[indices])\n #slope, intercept, rval, pvalue, err = ss.linregress(g[indices], d[indices])\n cbar = ax.scatter(gi,d, s=4, c=c, alpha=0.4, cmap=cmap)\n ax.plot(gi, slope*gi + intercept,'--', color='gray', linewidth=0.1)\n slopeArr.append(slope)\n \nflattened_g = numpy.array([g for sublist in all_conductances for g in sublist])\nflattened_d = numpy.array([d for sublist in all_delays for d in sublist])\n\nax.set_xlabel(\"$g_e$ (nS)\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\n# plt.colorbar(cbar)\nax.set_ylim(ymin=-5)\nsimpleaxis(ax)\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\ndump(fig,file('figures/fig6/6l_1.pkl','wb'))\nplt.show()",
"_____no_output_____"
],
[
"fig, (ax1, ax2, ax3) = plt.subplots(ncols=3)\ncmap = matplotlib.cm.viridis\n\ncolors = matplotlib.cm.viridis(numpy.linspace(0, 1, len(all_inh_conductances)))\n\n# norm = matplotlib.colors.Normalize(vmin=1, vmax=6)\nslopeArr = []\nfor i, (g, gi, d, c) in enumerate(zip(all_conductances, all_inh_conductances, all_delays, colors)):\n g, gi, d = numpy.array(g), numpy.array(gi), numpy.array(d)\n ax1.scatter(gi/g,d,s=.1,color='k')\n ax2.scatter(g,d,s=.1,color='k')\n ax3.scatter(gi,d,s=.1,color='k')\n \nflattened_g = numpy.array([g for sublist in all_conductances for g in sublist])\nflattened_gi = numpy.array([g for sublist in all_inh_conductances for g in sublist])\nflattened_gi_by_g = numpy.array([g for sublist in zip(all_conductances,all_inh_conductances) for g in sublist])\nflattened_d = numpy.array([d for sublist in all_delays for d in sublist])\n\nslope, intercept, rval, pvalue, err = ss.linregress(flattened_gi,flattened_d)\nax1.plot(gi/g, slope*(gi/g) + intercept)\nslope, intercept, rval, pvalue, err = ss.linregress(g, d)\nax2.plot(gi/g, slope*(gi/g) + intercept)\nslope, intercept, rval, pvalue, err = ss.linregress(gi, d)\nax3.plot(gi/g, slope*(gi/g) + intercept)\n\nax.set_xlabel(\"I/E\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\n# plt.colorbar(cbar)\nax1.set_ylim(ymin=-5)\nax2.set_ylim(ymin=-5)\nax3.set_ylim(ymin=-5)\nsimpleaxis([ax1, ax2, ax3])\nfig.set_figwidth(4.5)\nfig.set_figheight(1.5)\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\nbins = numpy.linspace(-3,0.25,13)\nprint(bins)\nax.hist(slopeArr,bins=bins,color='k')\nax.vlines(x=0,ymin=0,ymax=7.,color='r')\nsimpleaxis(ax)\nfig.set_figwidth(1.5)\nfig.set_figheight(1.5)\ndump(fig,file('figures/fig6/6l_2.pkl','wb'))\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Fitting through all cells",
"_____no_output_____"
]
],
[
[
"cmap = matplotlib.cm.viridis\ncolors = matplotlib.cm.viridis(numpy.linspace(0, 1, len(all_inh_conductances)))\nfig, ax = plt.subplots()\n# norm = matplotlib.colors.Normalize(vmin=1, vmax=6)\nslopeArr = []\nadist, bdist = [],[]\nflattened_g, flattened_d = [], []\nfor i, (g, gi, d, c) in enumerate(zip(all_conductances, all_inh_conductances, all_delays, colors)):\n g, gi, d = numpy.array(g), numpy.array(gi), numpy.array(d)\n indices = numpy.where(d>0)\n g, gi, d = g[indices], gi[indices], d[indices]\n flattened_g += list(g)\n flattened_d += list(d)\n indices = numpy.argsort(g)\n# delay_Model = lmfit.Model(delay_excitation)\n# delay_pars = delay_Model.make_params()\n \n# delay_result = delay_Model.fit(d, delay_pars, x=g)\n# indices = numpy.argsort(g)\n# # ax.scatter(g[indices], 1./d[indices], s=30, facecolor='k', edgecolor='k')\n ax.scatter(g[indices],1./d[indices], s=5, facecolor=colors[i], edgecolor=colors[i])\n# ax.plot(g[indices], delay_result.best_fit[indices], '--', color=colors[i], linewidth=1)\n# print (\"{:.2f} + {:.2f}g_e^{:.2f}\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))\n# adist.append(delay_result.params['a'].value)\n# bdist.append(delay_result.params['b'].value)\n# print(delay_result.fit_report())\n# ax.set_xlabel(\"$g_e$ (nS)\")\n# ax.set_ylabel(\"$\\\\delta_i$ (ms)\")\n # dump(fig,file('figures/fig6/6k.pkl','wb'))\n# flattened_g = numpy.array([g for sublist in all_conductances for g in sublist])\n# flattened_d = 1./numpy.array([d for sublist in all_delays for d in sublist])\n# delay_Model = lmfit.Model(delay_excitation)\n# delay_pars = delay_Model.make_params()\n\nflattened_d_nonInv = flattened_d[:]\nflattened_g = numpy.array(flattened_g)\nflattened_d = 1./numpy.array(flattened_d)\n\n#delay_result = delay_Model.fit(flattened_d, delay_pars, x=flattened_g)\nslope, intercept, lowerr, higherr = ss.mstats.theilslopes(y=flattened_d,x=flattened_g)\nindices = numpy.argsort(flattened_g)\nax.plot(flattened_g[indices], slope*flattened_g[indices] + intercept, '-',color='k')\n# ax.scatter(g[indices], d[indices], s=30, facecolor='k', edgecolor='k')\n#ax.plot(flattened_g[indices], delay_result.best_fit[indices], '-',color='k')\n#print (\"{:.2f} * g_e^{:.2f}\".format(delay_result.params['a'].value, delay_result.params['b'].value))#, delay_result.params['c'].value))\n#print(delay_result.fit_report())\nax.set_xlabel(\"$g_e$ (nS)\")\nax.set_ylabel(\"$\\\\delta_i$ (ms)\")\nfig.set_figwidth(4.5)\nfig.set_figheight(4.5)\nax.set_xlim(0,4.5)\nax.set_ylim(0,15)\nsimpleaxis(ax)\nplt.show()\n# ax.set_xticks(range(4))\n# ax.set_yticks(range(0,12,2))\n\n\n\n# print (\"{:.2f} + {:.2f}e^-{:.2f}E\".format(delay_result.params['a'].value, delay_result.params['b'].value, delay_result.params['c'].value))",
"_____no_output_____"
],
[
"from scipy.optimize import curve_fit\nfig, ax = plt.subplots()\nxArr = numpy.linspace(0.01,12,100)\nax.scatter(flattened_g,flattened_d_nonInv,s=8)\npopt, pcov = curve_fit(delay_excitation, flattened_g, flattened_d_nonInv, bounds=(0, [2., 2, 1]))\nax.plot(xArr, delay_excitation(xArr, *popt), 'r-')\n# ax.plot(xArr, (1.5/xArr) + 1.5, 'k--')\nax.set_xlim(-1,15)\nax.set_ylim(-1,15)\nplt.show()\nprint (popt, pcov)",
"_____no_output_____"
],
[
"print (slope, intercept)\nprint(lowerr,higherr)\n\nresiduals = (flattened_d - 1.5*flattened_g[indices]+1.5)\nplt.hist(residuals,bins=30)\nplt.vlines(x=0,ymin=0,ymax=200)\nplt.show()",
"_____no_output_____"
],
[
"residualArr = []\nfor i, (g, gi, d, c) in enumerate(zip(all_conductances, all_inh_conductances, all_delays, colors)):\n g, gi, d = numpy.array(g), numpy.array(gi), numpy.array(d)\n indices = numpy.where(d>0)\n g, gi, d = g[indices], gi[indices], d[indices]\n residuals = (d - 1.5*g+1.5)\n residualArr.append(residuals)\nplt.hist(residualArr,stacked=True)\nplt.show()",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(3,1)\nax[0].hist(adist)\nax[1].hist(bdist,bins=5)\nplt.show()\nprint(numpy.mean(adist))\nprint(numpy.mean(bdist))",
"_____no_output_____"
],
[
"slopeArr",
"_____no_output_____"
],
[
"fig, ax = plt.subplots()\ncmap = matplotlib.cm.viridis\n\ncolors = matplotlib.cm.viridis(numpy.linspace(0, 1, len(all_inh_conductances)))\n\n# norm = matplotlib.colors.Normalize(vmin=1, vmax=6)\nfor i, (g, gi, d, c) in enumerate(zip(all_conductances, all_inh_conductances, all_delays, colors)):\n g, gi, d = numpy.array(g), numpy.array(gi), numpy.array(d)\n indices = numpy.argsort(g)\n \n# slope, intercept, rval, pvalue, err = ss.linregress(g[indices], gi[indices])\n# print(slope)\n #cbar = ax.scatter(g,d, c= [slope]*len(g), s= 10, cmap='viridis', vmin=1.5, vmax=3.2)\n# print(g)\n gmax = numpy.ceil(max(g))\n gmin = numpy.floor(min(g))\n print(gmin, min(g), gmax, max(g))\n# bins = numpy.linspace(gmin,gmax,(gmax - gmin) +1)\n print (gmin, gmax)\n bins = numpy.arange(gmin,gmax,1)\n indices = numpy.argsort(g)\n \n digitized = numpy.digitize(g[indices], bins)\n# bins = range(8)\n g_means = numpy.array([g[indices][digitized == i].mean() for i in bins])\n g_err = numpy.array([g[indices][digitized == i].std() for i in bins])\n d_means = numpy.array([d[indices][digitized == i].mean() for i in bins])\n d_err = numpy.array([d[indices][digitized == i].std() for i in bins])\n \n finiteYmask = numpy.isfinite(g_means)\n d_means = d_means[finiteYmask]\n g_means = g_means[finiteYmask]\n d_err = d_err[finiteYmask]\n g_err = g_err[finiteYmask]\n \n slope, intercept, rval, pvalue, err = ss.linregress(g_means, d_means)\n \n ax.errorbar(g_means, d_means, xerr = g_err, yerr = d_err, linestyle='')\n cbar = ax.scatter(g_means, d_means, s=10, c=c, alpha=0.5, cmap='viridis')\n# indices = numpy.argsort(g_means)\n print(g_means, d_means, intercept, slope)\n ax.plot(g_means, intercept + slope*g_means, c=c)\nplt.show()",
"_____no_output_____"
],
[
"delay_Model = lmfit.Model(delay_excitation)\ndelay_pars = delay_Model.make_params()\n\nindices = numpy.argsort(flattened_g)\nflattened_g = flattened_g[indices]\nflattened_d_fit = delay_result.eval(x=flattened_g)\n\ndelay_result = delay_Model.fit(flattened_d, delay_pars, x=flattened_g)\n\n\nfig, ax = plt.subplots()\nax.scatter(flattened_g, flattened_d, s=10, alpha=0.2,c='k')\n\nprint(delay_result.fit_report())\nax.plot(flattened_g, flattened_d_fit)\nplt.show()\n# slope, intercept, rval, pvalue, err = ss.linregress(flattened_g[indices], flattened_d[indices])\n# x_axis = numpy.linspace(numpy.min(flattened_g), numpy.max(flattened_g), 100)\n# y_axis = slope * x_axis + intercept\n\n# ax.set_xlim(0,6)\n# ax.set_ylim(-3,10)\n\n# ax.plot(x_axis, y_axis, '--')\nprint ( delay_result.params['a'],delay_result.params['b'],delay_result.params['c'])",
"_____no_output_____"
],
[
"keySet = set(inh_onsets).intersection(exc_onsets)",
"_____no_output_____"
],
[
"for key in keySet:\n print (inh_onsets[key], exc_onsets[key])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fa990b19934b8cbf80f255f882dda5bae3251f | 103,386 | ipynb | Jupyter Notebook | OPC_Sensor/Models With Decompositions/Models with SparsePCA/CNN/CNN_tanh_binary.ipynb | utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset | 12d0aca165fe0faf503ca38bd6a391452b480565 | [
"MIT"
] | 5 | 2021-10-18T07:36:05.000Z | 2022-02-09T06:46:58.000Z | OPC_Sensor/Models With Decompositions/Models with SparsePCA/CNN/CNN_tanh_binary.ipynb | utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset | 12d0aca165fe0faf503ca38bd6a391452b480565 | [
"MIT"
] | null | null | null | OPC_Sensor/Models With Decompositions/Models with SparsePCA/CNN/CNN_tanh_binary.ipynb | utkarshkanswal/Machine-Learning-application-on-Air-quality-dataset | 12d0aca165fe0faf503ca38bd6a391452b480565 | [
"MIT"
] | null | null | null | 65.600254 | 21,216 | 0.664249 | [
[
[
"import tensorflow as tf\ntf.config.experimental.list_physical_devices()",
"_____no_output_____"
],
[
"tf.test.is_built_with_cuda()",
"_____no_output_____"
]
],
[
[
"# Importing Libraries",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot as plt\nimport os.path as op\nimport pickle",
"_____no_output_____"
],
[
"import tensorflow as tf\nfrom tensorflow import keras\nfrom keras.models import Model,Sequential,load_model\nfrom keras.layers import Input, Embedding\nfrom keras.layers import Dense, Bidirectional\nfrom keras.layers.recurrent import LSTM\nimport keras.metrics as metrics\nimport itertools\nfrom tensorflow.python.keras.utils.data_utils import Sequence\nfrom decimal import Decimal\nfrom keras import backend as K\nfrom keras.layers import Conv1D,MaxPooling1D,Flatten,Dense",
"_____no_output_____"
]
],
[
[
"# Data Fetching",
"_____no_output_____"
]
],
[
[
"A1=np.empty((0,5),dtype='float32')\nU1=np.empty((0,7),dtype='float32')\nnode=['150','149','147','144','142','140','136','61']\nmon=['Apr','Mar','Aug','Jun','Jul','Sep','May','Oct']\nfor j in node:\n for i in mon:\n inp= pd.read_csv('../../../data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[1,2,3,15,16])\n out= pd.read_csv('../../../data_gkv/AT510_Node_'+str(j)+'_'+str(i)+'19_OutputFile.csv',usecols=[5,6,7,8,17,18,19])\n \n inp=np.array(inp,dtype='float32')\n out=np.array(out,dtype='float32')\n \n A1=np.append(A1, inp, axis=0)\n U1=np.append(U1, out, axis=0)\n\nprint(A1)\nprint(U1)\n",
"[[1.50000e+02 1.90401e+05 7.25000e+02 2.75500e+01 8.03900e+01]\n [1.50000e+02 1.90401e+05 8.25000e+02 2.75600e+01 8.03300e+01]\n [1.50000e+02 1.90401e+05 9.25000e+02 2.75800e+01 8.02400e+01]\n ...\n [6.10000e+01 1.91020e+05 1.94532e+05 2.93700e+01 7.52100e+01]\n [6.10000e+01 1.91020e+05 1.94632e+05 2.93500e+01 7.52700e+01]\n [6.10000e+01 1.91020e+05 1.94732e+05 2.93400e+01 7.53000e+01]]\n[[ 28. 3. -52. ... 16.97 19.63 20.06]\n [ 28. 15. -53. ... 16.63 19.57 23.06]\n [ 31. 16. -55. ... 17.24 19.98 20.24]\n ...\n [ 76. 12. -76. ... 3.47 3.95 4.35]\n [ 75. 13. -76. ... 3.88 4.33 4.42]\n [ 76. 12. -75. ... 3.46 4.07 4.28]]\n"
]
],
[
[
"# Min Max Scaler",
"_____no_output_____"
]
],
[
[
"from sklearn.decomposition import SparsePCA\nimport warnings\nscaler_obj1=SparsePCA()\nscaler_obj2=SparsePCA()\nX1=scaler_obj1.fit_transform(A1)\nY1=scaler_obj2.fit_transform(U1)\n\nwarnings.filterwarnings(action='ignore', category=UserWarning)\n\n\nX1=X1[:,np.newaxis,:]\nY1=Y1[:,np.newaxis,:]",
"_____no_output_____"
],
[
"def rmse(y_true, y_pred):\n return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))\n\ndef coeff_determination(y_true, y_pred):\n SS_res = K.sum(K.square( y_true-y_pred )) \n SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) ) \n return ( 1 - SS_res/(SS_tot + K.epsilon()) )",
"_____no_output_____"
]
],
[
[
"# Model",
"_____no_output_____"
]
],
[
[
"inp=keras.Input(shape=(1,5))\n\nl=keras.layers.Conv1D(16,1,padding=\"same\",activation=\"tanh\",kernel_initializer=\"glorot_uniform\")(inp)\n \noutput = keras.layers.Conv1D(7,4,padding=\"same\",activation='sigmoid')(l)\n\nmodel1=keras.Model(inputs=inp,outputs=output)\nmodel1.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='binary_crossentropy',metrics=['accuracy','mse','mae',rmse])\nmodel1.summary()",
"Model: \"model\"\n_________________________________________________________________\n Layer (type) Output Shape Param # \n=================================================================\n input_1 (InputLayer) [(None, 1, 5)] 0 \n \n conv1d (Conv1D) (None, 1, 16) 96 \n \n conv1d_1 (Conv1D) (None, 1, 7) 455 \n \n=================================================================\nTotal params: 551\nTrainable params: 551\nNon-trainable params: 0\n_________________________________________________________________\n"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)\n\nhistory1 = model1.fit(x_train,y_train,batch_size=256,epochs=50, validation_data=(x_test, y_test),verbose = 2, shuffle= False)",
"Epoch 1/50\n5070/5070 - 132s - loss: -7.6008e+00 - accuracy: 0.1323 - mse: 2026000128.0000 - mae: 93.4407 - rmse: 189.2681 - val_loss: -7.7411e-02 - val_accuracy: 0.1347 - val_mse: 1403856.8750 - val_mae: 73.6462 - val_rmse: 139.6765 - 132s/epoch - 26ms/step\nEpoch 2/50\n5070/5070 - 80s - loss: -1.1697e+01 - accuracy: 0.1344 - mse: 2026000128.0000 - mae: 93.4300 - rmse: 189.2609 - val_loss: -3.4081e+00 - val_accuracy: 0.1352 - val_mse: 1403855.3750 - val_mae: 73.6374 - val_rmse: 139.6711 - 80s/epoch - 16ms/step\nEpoch 3/50\n5070/5070 - 80s - loss: -1.4041e+01 - accuracy: 0.1349 - mse: 2026000128.0000 - mae: 93.4219 - rmse: 189.2561 - val_loss: -6.3163e+00 - val_accuracy: 0.1354 - val_mse: 1403854.1250 - val_mae: 73.6305 - val_rmse: 139.6671 - 80s/epoch - 16ms/step\nEpoch 4/50\n5070/5070 - 80s - loss: -1.6811e+01 - accuracy: 0.1332 - mse: 2026000128.0000 - mae: 93.4158 - rmse: 189.2522 - val_loss: -9.1781e+00 - val_accuracy: 0.1325 - val_mse: 1403853.3750 - val_mae: 73.6249 - val_rmse: 139.6635 - 80s/epoch - 16ms/step\nEpoch 5/50\n5070/5070 - 83s - loss: -1.9563e+01 - accuracy: 0.1307 - mse: 2026000128.0000 - mae: 93.4107 - rmse: 189.2490 - val_loss: -1.2023e+01 - val_accuracy: 0.1277 - val_mse: 1403851.8750 - val_mae: 73.6201 - val_rmse: 139.6604 - 83s/epoch - 16ms/step\nEpoch 6/50\n5070/5070 - 117s - loss: -2.2306e+01 - accuracy: 0.1266 - mse: 2026000128.0000 - mae: 93.4063 - rmse: 189.2459 - val_loss: -1.4872e+01 - val_accuracy: 0.1251 - val_mse: 1403851.1250 - val_mae: 73.6159 - val_rmse: 139.6572 - 117s/epoch - 23ms/step\nEpoch 7/50\n5070/5070 - 123s - loss: -2.5049e+01 - accuracy: 0.1246 - mse: 2026000128.0000 - mae: 93.4022 - rmse: 189.2430 - val_loss: -1.7700e+01 - val_accuracy: 0.1243 - val_mse: 1403850.6250 - val_mae: 73.6119 - val_rmse: 139.6542 - 123s/epoch - 24ms/step\nEpoch 8/50\n5070/5070 - 94s - loss: -2.7787e+01 - accuracy: 0.1239 - mse: 2026000128.0000 - mae: 93.3984 - rmse: 189.2403 - val_loss: -2.0545e+01 - val_accuracy: 0.1235 - val_mse: 1403850.1250 - val_mae: 73.6084 - val_rmse: 139.6515 - 94s/epoch - 18ms/step\nEpoch 9/50\n5070/5070 - 56s - loss: -3.1235e+01 - accuracy: 0.2064 - mse: 2026000128.0000 - mae: 93.3951 - rmse: 189.2377 - val_loss: -2.3380e+01 - val_accuracy: 0.2884 - val_mse: 1403849.7500 - val_mae: 73.6052 - val_rmse: 139.6491 - 56s/epoch - 11ms/step\nEpoch 10/50\n5070/5070 - 108s - loss: -3.3999e+01 - accuracy: 0.2937 - mse: 2026000128.0000 - mae: 93.3919 - rmse: 189.2353 - val_loss: -2.6278e+01 - val_accuracy: 0.2999 - val_mse: 1403849.1250 - val_mae: 73.6021 - val_rmse: 139.6469 - 108s/epoch - 21ms/step\nEpoch 11/50\n5070/5070 - 53s - loss: -3.6875e+01 - accuracy: 0.3004 - mse: 2026000128.0000 - mae: 93.3889 - rmse: 189.2330 - val_loss: -2.9147e+01 - val_accuracy: 0.3000 - val_mse: 1403849.0000 - val_mae: 73.5993 - val_rmse: 139.6446 - 53s/epoch - 10ms/step\nEpoch 12/50\n5070/5070 - 52s - loss: -3.9692e+01 - accuracy: 0.3004 - mse: 2026000128.0000 - mae: 93.3860 - rmse: 189.2310 - val_loss: -3.1999e+01 - val_accuracy: 0.3000 - val_mse: 1403848.7500 - val_mae: 73.5963 - val_rmse: 139.6427 - 52s/epoch - 10ms/step\nEpoch 13/50\n5070/5070 - 50s - loss: -4.2429e+01 - accuracy: 0.3005 - mse: 2026000128.0000 - mae: 93.3834 - rmse: 189.2290 - val_loss: -3.4914e+01 - val_accuracy: 0.3000 - val_mse: 1403848.3750 - val_mae: 73.5937 - val_rmse: 139.6407 - 50s/epoch - 10ms/step\nEpoch 14/50\n5070/5070 - 51s - loss: -4.5255e+01 - accuracy: 0.3005 - mse: 2026000128.0000 - mae: 93.3808 - rmse: 189.2274 - val_loss: -3.7801e+01 - val_accuracy: 0.3001 - val_mse: 1403848.1250 - val_mae: 73.5913 - val_rmse: 139.6392 - 51s/epoch - 10ms/step\nEpoch 15/50\n5070/5070 - 52s - loss: -4.8030e+01 - accuracy: 0.3006 - mse: 2026000128.0000 - mae: 93.3783 - rmse: 189.2259 - val_loss: -4.0653e+01 - val_accuracy: 0.3001 - val_mse: 1403847.3750 - val_mae: 73.5890 - val_rmse: 139.6378 - 52s/epoch - 10ms/step\nEpoch 16/50\n5070/5070 - 50s - loss: -5.0786e+01 - accuracy: 0.3007 - mse: 2026000128.0000 - mae: 93.3761 - rmse: 189.2245 - val_loss: -4.3499e+01 - val_accuracy: 0.3002 - val_mse: 1403847.0000 - val_mae: 73.5866 - val_rmse: 139.6365 - 50s/epoch - 10ms/step\nEpoch 17/50\n5070/5070 - 50s - loss: -5.3541e+01 - accuracy: 0.3007 - mse: 2026000128.0000 - mae: 93.3739 - rmse: 189.2234 - val_loss: -4.6345e+01 - val_accuracy: 0.3002 - val_mse: 1403846.5000 - val_mae: 73.5845 - val_rmse: 139.6354 - 50s/epoch - 10ms/step\nEpoch 18/50\n5070/5070 - 52s - loss: -5.6295e+01 - accuracy: 0.3008 - mse: 2026000128.0000 - mae: 93.3718 - rmse: 189.2225 - val_loss: -4.9187e+01 - val_accuracy: 0.3003 - val_mse: 1403846.1250 - val_mae: 73.5826 - val_rmse: 139.6344 - 52s/epoch - 10ms/step\nEpoch 19/50\n5070/5070 - 50s - loss: -5.9048e+01 - accuracy: 0.3008 - mse: 2026000128.0000 - mae: 93.3698 - rmse: 189.2214 - val_loss: -5.2035e+01 - val_accuracy: 0.3003 - val_mse: 1403846.2500 - val_mae: 73.5805 - val_rmse: 139.6335 - 50s/epoch - 10ms/step\nEpoch 20/50\n5070/5070 - 51s - loss: -6.1799e+01 - accuracy: 0.3008 - mse: 2026000128.0000 - mae: 93.3679 - rmse: 189.2205 - val_loss: -5.4879e+01 - val_accuracy: 0.3003 - val_mse: 1403845.8750 - val_mae: 73.5786 - val_rmse: 139.6327 - 51s/epoch - 10ms/step\nEpoch 21/50\n5070/5070 - 53s - loss: -6.4549e+01 - accuracy: 0.3008 - mse: 2026000128.0000 - mae: 93.3660 - rmse: 189.2197 - val_loss: -5.7716e+01 - val_accuracy: 0.3003 - val_mse: 1403845.3750 - val_mae: 73.5769 - val_rmse: 139.6319 - 53s/epoch - 11ms/step\nEpoch 22/50\n5070/5070 - 51s - loss: -6.7300e+01 - accuracy: 0.3008 - mse: 2026000128.0000 - mae: 93.3643 - rmse: 189.2189 - val_loss: -6.0554e+01 - val_accuracy: 0.3003 - val_mse: 1403845.2500 - val_mae: 73.5751 - val_rmse: 139.6312 - 51s/epoch - 10ms/step\nEpoch 23/50\n5070/5070 - 50s - loss: -7.0048e+01 - accuracy: 0.3009 - mse: 2026000128.0000 - mae: 93.3625 - rmse: 189.2183 - val_loss: -6.3400e+01 - val_accuracy: 0.3003 - val_mse: 1403845.2500 - val_mae: 73.5735 - val_rmse: 139.6306 - 50s/epoch - 10ms/step\nEpoch 24/50\n5070/5070 - 50s - loss: -7.2797e+01 - accuracy: 0.3009 - mse: 2026000128.0000 - mae: 93.3609 - rmse: 189.2177 - val_loss: -6.6233e+01 - val_accuracy: 0.3004 - val_mse: 1403844.8750 - val_mae: 73.5717 - val_rmse: 139.6301 - 50s/epoch - 10ms/step\nEpoch 25/50\n5070/5070 - 50s - loss: -7.5545e+01 - accuracy: 0.3009 - mse: 2026000128.0000 - mae: 93.3594 - rmse: 189.2171 - val_loss: -6.9085e+01 - val_accuracy: 0.3004 - val_mse: 1403844.7500 - val_mae: 73.5703 - val_rmse: 139.6295 - 50s/epoch - 10ms/step\nEpoch 26/50\n5070/5070 - 49s - loss: -7.8296e+01 - accuracy: 0.3078 - mse: 2026000128.0000 - mae: 93.3578 - rmse: 189.2166 - val_loss: -7.1915e+01 - val_accuracy: 0.3117 - val_mse: 1403844.5000 - val_mae: 73.5689 - val_rmse: 139.6290 - 49s/epoch - 10ms/step\nEpoch 27/50\n5070/5070 - 49s - loss: -8.1046e+01 - accuracy: 0.3122 - mse: 2026000128.0000 - mae: 93.3564 - rmse: 189.2162 - val_loss: -7.4768e+01 - val_accuracy: 0.3118 - val_mse: 1403844.3750 - val_mae: 73.5674 - val_rmse: 139.6285 - 49s/epoch - 10ms/step\nEpoch 28/50\n5070/5070 - 50s - loss: -8.3796e+01 - accuracy: 0.3123 - mse: 2026000128.0000 - mae: 93.3549 - rmse: 189.2158 - val_loss: -7.7607e+01 - val_accuracy: 0.3119 - val_mse: 1403844.3750 - val_mae: 73.5661 - val_rmse: 139.6281 - 50s/epoch - 10ms/step\nEpoch 29/50\n5070/5070 - 50s - loss: -8.6542e+01 - accuracy: 0.3123 - mse: 2026000128.0000 - mae: 93.3537 - rmse: 189.2153 - val_loss: -8.0443e+01 - val_accuracy: 0.3119 - val_mse: 1403844.0000 - val_mae: 73.5648 - val_rmse: 139.6277 - 50s/epoch - 10ms/step\nEpoch 30/50\n5070/5070 - 50s - loss: -8.9291e+01 - accuracy: 0.3123 - mse: 2026000128.0000 - mae: 93.3524 - rmse: 189.2150 - val_loss: -8.3282e+01 - val_accuracy: 0.3119 - val_mse: 1403844.0000 - val_mae: 73.5636 - val_rmse: 139.6273 - 50s/epoch - 10ms/step\nEpoch 31/50\n5070/5070 - 50s - loss: -9.2038e+01 - accuracy: 0.3123 - mse: 2026000128.0000 - mae: 93.3513 - rmse: 189.2147 - val_loss: -8.6123e+01 - val_accuracy: 0.3119 - val_mse: 1403843.7500 - val_mae: 73.5624 - val_rmse: 139.6270 - 50s/epoch - 10ms/step\nEpoch 32/50\n5070/5070 - 50s - loss: -9.4784e+01 - accuracy: 0.3126 - mse: 2026000128.0000 - mae: 93.3500 - rmse: 189.2144 - val_loss: -8.8961e+01 - val_accuracy: 0.3128 - val_mse: 1403843.6250 - val_mae: 73.5611 - val_rmse: 139.6267 - 50s/epoch - 10ms/step\n"
],
[
"model1.evaluate(x_test,y_test)",
"13518/13518 [==============================] - 99s 7ms/step - loss: -140.0141 - accuracy: 0.3129 - mse: 1403845.8750 - mae: 73.5453 - rmse: 139.6234\n"
]
],
[
[
"# Saving Model as File",
"_____no_output_____"
]
],
[
[
"model1.evaluate(x_train,y_train)",
"40554/40554 [==============================] - 306s 8ms/step - loss: -145.5569 - accuracy: 0.3133 - mse: 2025999232.0000 - mae: 93.3334 - rmse: 189.2106\n"
],
[
"df1=pd.DataFrame(history1.history['loss'],columns=[\"Loss\"])\ndf1=df1.join(pd.DataFrame(history1.history[\"val_loss\"],columns=[\"Val Loss\"]))\ndf1=df1.join(pd.DataFrame(history1.history[\"accuracy\"],columns=['Accuracy']))\ndf1=df1.join(pd.DataFrame(history1.history[\"val_accuracy\"],columns=['Val Accuracy']))\ndf1=df1.join(pd.DataFrame(history1.history[\"mse\"],columns=['MSE']))\ndf1=df1.join(pd.DataFrame(history1.history[\"val_mse\"],columns=['Val MSE']))\ndf1=df1.join(pd.DataFrame(history1.history[\"mae\"],columns=['MAE']))\ndf1=df1.join(pd.DataFrame(history1.history[\"val_mae\"],columns=['Val MAE']))\ndf1=df1.join(pd.DataFrame(history1.history[\"rmse\"],columns=['RMSE']))\ndf1=df1.join(pd.DataFrame(history1.history[\"val_mse\"],columns=['Val RMSE']))\ndf1",
"_____no_output_____"
],
[
"df1.to_excel(\"GRU_tanh_mse.xlsx\")",
"_____no_output_____"
],
[
"model_json = model1.to_json()\nwith open(\"cnn_relu.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel1.save_weights(\"cnn_relu.h5\")\nprint(\"Saved model to disk\")",
"Saved model to disk\n"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)\n\nfrom keras.models import model_from_json\njson_file = open('cnn_relu.json', 'r')\nloaded_model_json = json_file.read()\njson_file.close()\nloaded_model = model_from_json(loaded_model_json)\n# load weights into new model\nloaded_model.load_weights(\"cnn_relu.h5\")\nprint(\"Loaded model from disk\")\nloaded_model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-5), loss='mse',metrics=['accuracy','mse','mae',rmse])",
"Loaded model from disk\n"
],
[
"loaded_model.evaluate(x_train, y_train, verbose=0)",
"_____no_output_____"
],
[
"loaded_model.evaluate(x_test, y_test, verbose=0)",
"_____no_output_____"
]
],
[
[
"# Error Analysis",
"_____no_output_____"
]
],
[
[
"# summarize history for loss\nplt.plot(history1.history['loss'])\nplt.plot(history1.history['val_loss'])\nplt.title('Model Loss',fontweight ='bold',fontsize = 15)\nplt.ylabel('Loss',fontweight ='bold',fontsize = 15)\nplt.xlabel('Epoch',fontweight ='bold',fontsize = 15)\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()\n\n# summarize history for accuracy\nplt.plot(history1.history['accuracy'])\nplt.plot(history1.history['val_accuracy'])\nplt.title('Model accuracy',fontweight ='bold',fontsize = 15)\nplt.ylabel('Accuracy',fontweight ='bold',fontsize = 15)\nplt.xlabel('Epoch',fontweight ='bold',fontsize = 15)\nplt.legend(['Train', 'Test'], loc='upper left')\nplt.show()",
"_____no_output_____"
],
[
"from sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(X1, Y1, test_size=0.25, random_state=42)\n\ny_test_pred=loaded_model.predict(x_test)\ny_test_pred",
"_____no_output_____"
],
[
"y_test",
"_____no_output_____"
],
[
"y_test=y_test[:,0]\ny_test_pred=y_test_pred[:,0]",
"_____no_output_____"
],
[
"from numpy import savetxt\nsavetxt('cnn_y_test_pred.csv', y_test_pred[:1001], delimiter=',')",
"_____no_output_____"
],
[
"from numpy import savetxt\nsavetxt('cnn_y_test.csv', y_test[:1001], delimiter=',')",
"_____no_output_____"
],
[
"#completed",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7faa4c1166961994e97698a29a0373a6755c926 | 633,369 | ipynb | Jupyter Notebook | notebook/safa.ipynb | alineu/elastic_beams_in_shear_flow | 297a6080f61fdca2ce1fa0f953f9cf871811efcc | [
"MIT"
] | null | null | null | notebook/safa.ipynb | alineu/elastic_beams_in_shear_flow | 297a6080f61fdca2ce1fa0f953f9cf871811efcc | [
"MIT"
] | null | null | null | notebook/safa.ipynb | alineu/elastic_beams_in_shear_flow | 297a6080f61fdca2ce1fa0f953f9cf871811efcc | [
"MIT"
] | null | null | null | 1,671.158311 | 124,474 | 0.943202 | [
[
[
"import numpy as np\nimport pandas as pd\nimport warnings\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc, rcParams\nwarnings.filterwarnings('ignore')\npd.set_option('display.float_format', lambda x: '%.5f' % x)\nmatplotlib.rcParams['font.family'] = 'serif'\nrc('font',**{'family':'serif','serif':['Times']})\nrc('text', usetex=True)\nwarnings.filterwarnings('ignore')\nfigPath = \"/Users/Ali/Dropbox/paper/figures\"",
"_____no_output_____"
]
],
[
[
"## Carreau-Yassuda",
"_____no_output_____"
]
],
[
[
"(nu*x).shape",
"_____no_output_____"
],
[
"import pylab\nimport matplotlib.pyplot as plt\nimport numpy\n\ndef get_cmap(n,name='viridis'):\n return plt.cm.get_cmap(name,n)\n\na = 2\nnu0=5e-3\nnuinf=1e-3\nh_channel = 1.2 #m\nshear_rate_order = 10\nx = numpy.logspace(-shear_rate_order,shear_rate_order,10000)\nk=[5e-1,5e2,5e8]\nn=[0.8]\nfig, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nvelocity = 2.0 #m/s\nshear_rate = velocity/h_channel #1/s\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nright, bottom, width, height = [0.6, 0.6, 0.28, 0.25]\n#ax2 = fig.add_axes([right, bottom, width, height])\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = nuinf+(nu0-nuinf)*(1+(k[i_k]*x)**a)**((n[i_n]-1)/a)\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} \\ (\\frac{1}{s})$')\n \nax.axvline(x=(shear_rate),c='r',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\nax.set_title('Carreau-Yasuda model parameters \\n ' + \n r'$\\mu=\\mu_{\\,\\infty}+(\\mu_{\\,0}-\\mu_{\\,\\infty})\\big[{1+(k\\dot{\\gamma})^a}\\big]^{\\frac{{(n-1)}_{ }}{a}}$')\npylab.show()\nk=[1,30,1e5]\nn=[0.5]\nf, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nvelocity = 0.5 #m/s\nshear_rate = velocity/h_channel #1/s\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = nuinf+(nu0-nuinf)*(1+(k[i_k]*x)**a)**((n[i_n]-1)/a)\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} \\ (\\frac{1}{s})$')\nax.axvline(x=(shear_rate),c='g',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\n\npylab.show()\n\n\nk=[2,30,1e3]\nn=[0.2]\nf, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nvelocity = 0.2 #m/s\nshear_rate = velocity/h_channel #1/s\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = nuinf+(nu0-nuinf)*(1+(k[i_k]*x)**a)**((n[i_n]-1)/a)\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} (\\frac{1}{s})$')\nax.axvline(x=shear_rate,c='b',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\npylab.show()\n",
"_____no_output_____"
]
],
[
[
"## Herschel-Bulkley",
"_____no_output_____"
]
],
[
[
"import pylab\nimport matplotlib.pyplot as plt\nimport numpy\n\ndef get_cmap(n,name='viridis'):\n return plt.cm.get_cmap(name,n)\n\na = 2\nnpoints = 10000\nh_channel = 1.2 #m\nvelocity = 2.0 #m/s\nshear_rate = velocity/h_channel #1/s\nnu0=5e-3*numpy.ones(npoints)\nnuinf=1e-3 # Newtonian regime\ntau0=shear_rate*nuinf*numpy.ones(npoints)\nshear_rate_order = 10\nx = numpy.logspace(-shear_rate_order,shear_rate_order,npoints)\n\nf, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nk=[4.4e-3,1.5e-3,1e-5]\nn=[0.8]\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = numpy.minimum(nu0,(tau0/shear_rate)+k[i_k]*(x**(n[i_n]-1)))\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} \\ (\\frac{1}{s})$')\nax.axvline(x=(shear_rate),c='r',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.set_title(\"Herschel-Bulkley model parameters \\n \" + \n r'$\\mu = \\mathrm{min}(\\mu_0,\\frac{\\tau_0}{\\dot{\\gamma}}+k\\,\\dot{\\gamma}^{\\,n-1})$')\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\npylab.show()\n\nf, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nk=[2.5e-3,8e-4,1e-5]\nn=[0.5]\nvelocity = 0.5 #m/s\nshear_rate = velocity/h_channel #1/s\ntau0=shear_rate*nuinf*numpy.ones(npoints)\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = numpy.minimum(nu0,(tau0/shear_rate)+k[i_k]*(x**(n[i_n]-1)))\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} \\ (\\frac{1}{s})$')\nax.axvline(x=(shear_rate),c='g',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\npylab.show()\n\nf, ax = plt.subplots(figsize=(6,4),dpi=250,facecolor='w')\nk=[9e-4,3e-4,1e-5]\nn=[0.2]\nvelocity = 0.2 #m/s\nshear_rate = velocity/h_channel #1/s\ntau0=shear_rate*nuinf*numpy.ones(npoints)\nshear_rate = velocity/h_channel #1/s\ndim=len(k)*len(n)\ncmap=get_cmap(dim)\nfor i_k in range(len(k)):\n for i_n in range(len(n)):\n index = i_k*len(n)+i_n\n nu = numpy.minimum(nu0,(tau0/shear_rate)+k[i_k]*(x**(n[i_n]-1)))\n ax.loglog(x,nu,':',c=cmap(int(index)),mfc='w',markersize=2,label=\"k=%2.1e, n=%2.1e\" % (k[i_k],n[i_n]))\n ax.set_ylabel(r'$\\mu$'+' (Pa.s)')\n ax.set_xlabel(r'$\\dot{\\gamma} \\ (\\frac{1}{s})$')\nax.axvline(x=(shear_rate),c='b',lw=0.5,linestyle='--',label=(r'$\\dot{\\gamma} = %2.2f $')%shear_rate)\nax.legend(loc='center',bbox_to_anchor=(1.2,0.5))\npylab.show()\n",
"_____no_output_____"
],
[
"(tau0/shear_rate)+k[i_k]*(x**(n[i_n]-1))",
"_____no_output_____"
],
[
"min(nu0,k[i_k]*(x**(n[i_n]-1)))",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7faabc977e6223bcaaafd9c91a26db880d59408 | 110,050 | ipynb | Jupyter Notebook | [python_datavisualization]Ex2_Plotly.ipynb | KeeLee-BNU/Python-_wanglab- | 5f869ec6f114f91a825ef14087fbfc768e180183 | [
"Apache-2.0"
] | null | null | null | [python_datavisualization]Ex2_Plotly.ipynb | KeeLee-BNU/Python-_wanglab- | 5f869ec6f114f91a825ef14087fbfc768e180183 | [
"Apache-2.0"
] | null | null | null | [python_datavisualization]Ex2_Plotly.ipynb | KeeLee-BNU/Python-_wanglab- | 5f869ec6f114f91a825ef14087fbfc768e180183 | [
"Apache-2.0"
] | null | null | null | 31.88007 | 10,988 | 0.353539 | [
[
[
"import os\nprint(os.getcwd())\nos.chdir(r'C:\\Users\\王浣清\\desktop')",
"C:\\Users\\王浣清\\Desktop\n"
],
[
"import pandas as pd\ndf = pd.read_csv('py_vislz_data.csv',sep=',',header=0)\ndf",
"_____no_output_____"
],
[
"import plotly.offline as of\nof.offline.init_notebook_mode(connected=True)",
"_____no_output_____"
],
[
"import plotly.graph_objects as go\n\nfig = go.Figure(data=[\n go.Bar(name='time_1', x=df.group, y=df.time_1,marker=dict(color='darkgreen')),\n go.Bar(name='time_2', x=df.group, y=df.time_2,marker=dict(color='rgb(122,230,180)')),\n go.Bar(name='time_3', x=df.group, y=df.time_3,marker=dict(color='#59606D')),\n \n])\n# Change the bar mode\nfig.update_layout(barmode='group')\nfig.show()\n# of.plot(fig)",
"_____no_output_____"
],
[
"# # https://plotly.com/python/heatmaps/\nimport plotly.express as px\nfig = px.imshow([\n df.time_1,df.time_2,df.time_3,\n])\nfig.show()",
"_____no_output_____"
],
[
"fig = px.bar(df, x=\"name\", y=[\"time_1\",\"time_2\"],color=\"age\")\nfig.show()",
"_____no_output_____"
],
[
"import inspect\ninspect.signature(go.Bar)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fab13d956f5c2ec2f7491a27daab6a38ba773f | 65,226 | ipynb | Jupyter Notebook | notebooks/community/gapic/automl/showcase_automl_video_action_recognition_batch.ipynb | shenzhimo2/vertex-ai-samples | 06fcfbff4800e4aa9a69266dd9b1d3e51a618b47 | [
"Apache-2.0"
] | 2 | 2021-10-02T02:17:20.000Z | 2021-11-17T10:35:01.000Z | notebooks/community/gapic/automl/showcase_automl_video_action_recognition_batch.ipynb | shenzhimo2/vertex-ai-samples | 06fcfbff4800e4aa9a69266dd9b1d3e51a618b47 | [
"Apache-2.0"
] | 4 | 2021-08-18T18:58:26.000Z | 2022-02-10T07:03:36.000Z | notebooks/community/gapic/automl/showcase_automl_video_action_recognition_batch.ipynb | shenzhimo2/vertex-ai-samples | 06fcfbff4800e4aa9a69266dd9b1d3e51a618b47 | [
"Apache-2.0"
] | null | null | null | 36.809255 | 370 | 0.610508 | [
[
[
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Vertex client library: AutoML video action recognition model for batch prediction\n\n<table align=\"left\">\n <td>\n <a href=\"https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_video_action_recognition_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/colab-logo-32px.png\" alt=\"Colab logo\"> Run in Colab\n </a>\n </td>\n <td>\n <a href=\"https://github.com/GoogleCloudPlatform/vertex-ai-samples/blob/master/notebooks/community/gapic/automl/showcase_automl_video_action_recognition_batch.ipynb\">\n <img src=\"https://cloud.google.com/ml-engine/images/github-logo-32px.png\" alt=\"GitHub logo\">\n View on GitHub\n </a>\n </td>\n</table>\n<br/><br/><br/>",
"_____no_output_____"
],
[
"## Overview\n\n\nThis tutorial demonstrates how to use the Vertex client library for Python to create video action recognition models and do batch prediction using Google Cloud's [AutoML](https://cloud.google.com/vertex-ai/docs/start/automl-users).",
"_____no_output_____"
],
[
"### Dataset\n\nThe dataset used for this tutorial is the golf swing recognition portion of the [Human Motion dataset](https://todo) from [MIT](http://cbcl.mit.edu/publications/ps/Kuehne_etal_iccv11.pdf). The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket. The trained model will predict the start frame where a golf swing begins.",
"_____no_output_____"
],
[
"### Objective\n\nIn this tutorial, you create an AutoML video action recognition model from a Python script, and then do a batch prediction using the Vertex client library. You can alternatively create and deploy models using the `gcloud` command-line tool or online using the Google Cloud Console.\n\nThe steps performed include:\n\n- Create a Vertex `Dataset` resource.\n- Train the model.\n- View the model evaluation.\n- Make a batch prediction.\n\nThere is one key difference between using batch prediction and using online prediction:\n\n* Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.\n\n* Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.",
"_____no_output_____"
],
[
"### Costs\n\nThis tutorial uses billable components of Google Cloud (GCP):\n\n* Vertex AI\n* Cloud Storage\n\nLearn about [Vertex AI\npricing](https://cloud.google.com/vertex-ai/pricing) and [Cloud Storage\npricing](https://cloud.google.com/storage/pricing), and use the [Pricing\nCalculator](https://cloud.google.com/products/calculator/)\nto generate a cost estimate based on your projected usage.",
"_____no_output_____"
],
[
"## Installation\n\nInstall the latest version of Vertex client library.",
"_____no_output_____"
]
],
[
[
"import os\nimport sys\n\n# Google Cloud Notebook\nif os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n USER_FLAG = '--user'\nelse:\n USER_FLAG = ''\n\n! pip3 install -U google-cloud-aiplatform $USER_FLAG",
"_____no_output_____"
]
],
[
[
"Install the latest GA version of *google-cloud-storage* library as well.",
"_____no_output_____"
]
],
[
[
"! pip3 install -U google-cloud-storage $USER_FLAG",
"_____no_output_____"
]
],
[
[
"### Restart the kernel\n\nOnce you've installed the Vertex client library and Google *cloud-storage*, you need to restart the notebook kernel so it can find the packages.",
"_____no_output_____"
]
],
[
[
"if not os.getenv(\"IS_TESTING\"):\n # Automatically restart kernel after installs\n import IPython\n app = IPython.Application.instance()\n app.kernel.do_shutdown(True)",
"_____no_output_____"
]
],
[
[
"## Before you begin\n\n### GPU runtime\n\n*Make sure you're running this notebook in a GPU runtime if you have that option. In Colab, select* **Runtime > Change Runtime Type > GPU**\n\n### Set up your Google Cloud project\n\n**The following steps are required, regardless of your notebook environment.**\n\n1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.\n\n2. [Make sure that billing is enabled for your project.](https://cloud.google.com/billing/docs/how-to/modify-project)\n\n3. [Enable the Vertex APIs and Compute Engine APIs.](https://console.cloud.google.com/flows/enableapi?apiid=ml.googleapis.com,compute_component)\n\n4. [The Google Cloud SDK](https://cloud.google.com/sdk) is already installed in Google Cloud Notebook.\n\n5. Enter your project ID in the cell below. Then run the cell to make sure the\nCloud SDK uses the right project for all the commands in this notebook.\n\n**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.",
"_____no_output_____"
]
],
[
[
"PROJECT_ID = \"[your-project-id]\" #@param {type:\"string\"}",
"_____no_output_____"
],
[
"if PROJECT_ID == \"\" or PROJECT_ID is None or PROJECT_ID == \"[your-project-id]\":\n # Get your GCP project id from gcloud\n shell_output = !gcloud config list --format 'value(core.project)' 2>/dev/null\n PROJECT_ID = shell_output[0]\n print(\"Project ID:\", PROJECT_ID)",
"_____no_output_____"
],
[
"! gcloud config set project $PROJECT_ID",
"_____no_output_____"
]
],
[
[
"#### Region\n\nYou can also change the `REGION` variable, which is used for operations\nthroughout the rest of this notebook. Below are regions supported for Vertex. We recommend that you choose the region closest to you.\n\n- Americas: `us-central1`\n- Europe: `europe-west4`\n- Asia Pacific: `asia-east1`\n\nYou may not use a multi-regional bucket for training with Vertex. Not all regions provide support for all Vertex services. For the latest support per region, see the [Vertex locations documentation](https://cloud.google.com/vertex-ai/docs/general/locations)",
"_____no_output_____"
]
],
[
[
"REGION = 'us-central1' #@param {type: \"string\"}",
"_____no_output_____"
]
],
[
[
"#### Timestamp\n\nIf you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append onto the name of resources which will be created in this tutorial.",
"_____no_output_____"
]
],
[
[
"from datetime import datetime\n\nTIMESTAMP = datetime.now().strftime(\"%Y%m%d%H%M%S\")",
"_____no_output_____"
]
],
[
[
"### Authenticate your Google Cloud account\n\n**If you are using Google Cloud Notebook**, your environment is already authenticated. Skip this step.\n\n**If you are using Colab**, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.\n\n**Otherwise**, follow these steps:\n\nIn the Cloud Console, go to the [Create service account key](https://console.cloud.google.com/apis/credentials/serviceaccountkey) page.\n\n**Click Create service account**.\n\nIn the **Service account name** field, enter a name, and click **Create**.\n\nIn the **Grant this service account access to project** section, click the Role drop-down list. Type \"Vertex\" into the filter box, and select **Vertex Administrator**. Type \"Storage Object Admin\" into the filter box, and select **Storage Object Admin**.\n\nClick Create. A JSON file that contains your key downloads to your local environment.\n\nEnter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.",
"_____no_output_____"
]
],
[
[
"# If you are running this notebook in Colab, run this cell and follow the\n# instructions to authenticate your GCP account. This provides access to your\n# Cloud Storage bucket and lets you submit training jobs and prediction\n# requests.\n\n# If on Google Cloud Notebook, then don't execute this code\nif not os.path.exists(\"/opt/deeplearning/metadata/env_version\"):\n if \"google.colab\" in sys.modules:\n from google.colab import auth as google_auth\n\n google_auth.authenticate_user()\n\n # If you are running this notebook locally, replace the string below with the\n # path to your service account key and run this cell to authenticate your GCP\n # account.\n elif not os.getenv(\"IS_TESTING\"):\n %env GOOGLE_APPLICATION_CREDENTIALS ''",
"_____no_output_____"
]
],
[
[
"### Create a Cloud Storage bucket\n\n**The following steps are required, regardless of your notebook environment.**\n\nThis tutorial is designed to use training data that is in a public Cloud Storage bucket and a local Cloud Storage bucket for your batch predictions. You may alternatively use your own training data that you have stored in a local Cloud Storage bucket.\n\nSet the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.",
"_____no_output_____"
]
],
[
[
"BUCKET_NAME = \"gs://[your-bucket-name]\" #@param {type:\"string\"}",
"_____no_output_____"
],
[
"if BUCKET_NAME == \"\" or BUCKET_NAME is None or BUCKET_NAME == \"gs://[your-bucket-name]\":\n BUCKET_NAME = \"gs://\" + PROJECT_ID + \"aip-\" + TIMESTAMP",
"_____no_output_____"
]
],
[
[
"**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.",
"_____no_output_____"
]
],
[
[
"! gsutil mb -l $REGION $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"Finally, validate access to your Cloud Storage bucket by examining its contents:",
"_____no_output_____"
]
],
[
[
"! gsutil ls -al $BUCKET_NAME",
"_____no_output_____"
]
],
[
[
"### Set up variables\n\nNext, set up some variables used throughout the tutorial.\n### Import libraries and define constants",
"_____no_output_____"
],
[
"#### Import Vertex client library\n\nImport the Vertex client library into our Python environment.",
"_____no_output_____"
]
],
[
[
"import time\n\nfrom google.cloud.aiplatform import gapic as aip\nfrom google.protobuf import json_format\nfrom google.protobuf.json_format import MessageToJson, ParseDict\nfrom google.protobuf.struct_pb2 import Struct, Value",
"_____no_output_____"
]
],
[
[
"#### Vertex constants\n\nSetup up the following constants for Vertex:\n\n- `API_ENDPOINT`: The Vertex API service endpoint for dataset, model, job, pipeline and endpoint services.\n- `PARENT`: The Vertex location root path for dataset, model, job, pipeline and endpoint resources.",
"_____no_output_____"
]
],
[
[
"# API service endpoint\nAPI_ENDPOINT = \"{}-aiplatform.googleapis.com\".format(REGION)\n\n# Vertex location root path for your dataset, model and endpoint resources\nPARENT = \"projects/\" + PROJECT_ID + \"/locations/\" + REGION",
"_____no_output_____"
]
],
[
[
"#### AutoML constants\n\nSet constants unique to AutoML datasets and training:\n\n- Dataset Schemas: Tells the `Dataset` resource service which type of dataset it is.\n- Data Labeling (Annotations) Schemas: Tells the `Dataset` resource service how the data is labeled (annotated).\n- Dataset Training Schemas: Tells the `Pipeline` resource service the task (e.g., classification) to train the model for.",
"_____no_output_____"
]
],
[
[
"# Video Dataset type\nDATA_SCHEMA = 'gs://google-cloud-aiplatform/schema/dataset/metadata/video_1.0.0.yaml'\n# Video Labeling type\nLABEL_SCHEMA = \"gs://google-cloud-aiplatform/schema/dataset/ioformat/video_action_recognition_io_format_1.0.0.yaml\"\n# Video Training task\nTRAINING_SCHEMA = \"gs://google-cloud-aiplatform/schema/trainingjob/definition/automl_video_action_recognition_1.0.0.yaml\"",
"_____no_output_____"
]
],
[
[
"#### Hardware Accelerators\n\nSet the hardware accelerators (e.g., GPU), if any, for prediction.\n\nSet the variable `DEPLOY_GPU/DEPLOY_NGPU` to use a container image supporting a GPU and the number of GPUs allocated to the virtual machine (VM) instance. For example, to use a GPU container image with 4 Nvidia Telsa K80 GPUs allocated to each VM, you would specify:\n\n (aip.AcceleratorType.NVIDIA_TESLA_K80, 4)\n\nFor GPU, available accelerators include:\n - aip.AcceleratorType.NVIDIA_TESLA_K80\n - aip.AcceleratorType.NVIDIA_TESLA_P100\n - aip.AcceleratorType.NVIDIA_TESLA_P4\n - aip.AcceleratorType.NVIDIA_TESLA_T4\n - aip.AcceleratorType.NVIDIA_TESLA_V100\n\nOtherwise specify `(None, None)` to use a container image to run on a CPU.",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_DEPOLY_GPU\"):\n DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, int(os.getenv(\"IS_TESTING_DEPOLY_GPU\")))\nelse:\n DEPLOY_GPU, DEPLOY_NGPU = (aip.AcceleratorType.NVIDIA_TESLA_K80, 1)",
"_____no_output_____"
]
],
[
[
"#### Container (Docker) image\n\nFor AutoML batch prediction, the container image for the serving binary is pre-determined by the Vertex prediction service. More specifically, the service will pick the appropriate container for the model depending on the hardware accelerator you selected.",
"_____no_output_____"
],
[
"#### Machine Type\n\nNext, set the machine type to use for prediction.\n\n- Set the variable `DEPLOY_COMPUTE` to configure the compute resources for the VM you will use for prediction.\n - `machine type`\n - `n1-standard`: 3.75GB of memory per vCPU.\n - `n1-highmem`: 6.5GB of memory per vCPU\n - `n1-highcpu`: 0.9 GB of memory per vCPU\n - `vCPUs`: number of \\[2, 4, 8, 16, 32, 64, 96 \\]\n\n*Note: You may also use n2 and e2 machine types for training and deployment, but they do not support GPUs*",
"_____no_output_____"
]
],
[
[
"if os.getenv(\"IS_TESTING_DEPLOY_MACHINE\"):\n MACHINE_TYPE = os.getenv(\"IS_TESTING_DEPLOY_MACHINE\")\nelse:\n MACHINE_TYPE = 'n1-standard'\n\nVCPU = '4'\nDEPLOY_COMPUTE = MACHINE_TYPE + '-' + VCPU\nprint('Deploy machine type', DEPLOY_COMPUTE)",
"_____no_output_____"
]
],
[
[
"# Tutorial\n\nNow you are ready to start creating your own AutoML video action recognition model.",
"_____no_output_____"
],
[
"## Set up clients\n\nThe Vertex client library works as a client/server model. On your side (the Python script) you will create a client that sends requests and receives responses from the Vertex server.\n\nYou will use different clients in this tutorial for different steps in the workflow. So set them all up upfront.\n\n- Dataset Service for `Dataset` resources.\n- Model Service for `Model` resources.\n- Pipeline Service for training.\n- Job Service for batch prediction and custom training.",
"_____no_output_____"
]
],
[
[
"# client options same for all services\nclient_options = {\"api_endpoint\": API_ENDPOINT}\n\n\ndef create_dataset_client():\n client = aip.DatasetServiceClient(\n client_options=client_options\n )\n return client\n\n\ndef create_model_client():\n client = aip.ModelServiceClient(\n client_options=client_options\n )\n return client\n\n\ndef create_pipeline_client():\n client = aip.PipelineServiceClient(\n client_options=client_options\n )\n return client\n\n\ndef create_job_client():\n client = aip.JobServiceClient(\n client_options=client_options\n )\n return client\n\n\nclients = {}\nclients['dataset'] = create_dataset_client()\nclients['model'] = create_model_client()\nclients['pipeline'] = create_pipeline_client()\nclients['job'] = create_job_client()\n\nfor client in clients.items():\n print(client)",
"_____no_output_____"
]
],
[
[
"## Dataset\n\nNow that your clients are ready, your first step in training a model is to create a managed dataset instance, and then upload your labeled data to it.\n\n### Create `Dataset` resource instance\n\nUse the helper function `create_dataset` to create the instance of a `Dataset` resource. This function does the following:\n\n1. Uses the dataset client service.\n2. Creates an Vertex `Dataset` resource (`aip.Dataset`), with the following parameters:\n - `display_name`: The human-readable name you choose to give it.\n - `metadata_schema_uri`: The schema for the dataset type.\n3. Calls the client dataset service method `create_dataset`, with the following parameters:\n - `parent`: The Vertex location root path for your `Database`, `Model` and `Endpoint` resources.\n - `dataset`: The Vertex dataset object instance you created.\n4. The method returns an `operation` object.\n\nAn `operation` object is how Vertex handles asynchronous calls for long running operations. While this step usually goes fast, when you first use it in your project, there is a longer delay due to provisioning.\n\nYou can use the `operation` object to get status on the operation (e.g., create `Dataset` resource) or to cancel the operation, by invoking an operation method:\n\n| Method | Description |\n| ----------- | ----------- |\n| result() | Waits for the operation to complete and returns a result object in JSON format. |\n| running() | Returns True/False on whether the operation is still running. |\n| done() | Returns True/False on whether the operation is completed. |\n| canceled() | Returns True/False on whether the operation was canceled. |\n| cancel() | Cancels the operation (this may take up to 30 seconds). |",
"_____no_output_____"
]
],
[
[
"TIMEOUT = 90\n\ndef create_dataset(name, schema, labels=None, timeout=TIMEOUT):\n start_time = time.time()\n try:\n dataset = aip.Dataset(display_name=name, metadata_schema_uri=schema, labels=labels)\n\n operation = clients['dataset'].create_dataset(parent=PARENT, dataset=dataset)\n print(\"Long running operation:\", operation.operation.name)\n result = operation.result(timeout=TIMEOUT)\n print(\"time:\", time.time() - start_time)\n print(\"response\")\n print(\" name:\", result.name)\n print(\" display_name:\", result.display_name)\n print(\" metadata_schema_uri:\", result.metadata_schema_uri)\n print(\" metadata:\", dict(result.metadata))\n print(\" create_time:\", result.create_time)\n print(\" update_time:\", result.update_time)\n print(\" etag:\", result.etag)\n print(\" labels:\", dict(result.labels))\n return result\n except Exception as e:\n print(\"exception:\", e)\n return None\n\n\nresult = create_dataset(\"golf-\" + TIMESTAMP, DATA_SCHEMA)",
"_____no_output_____"
]
],
[
[
"Now save the unique dataset identifier for the `Dataset` resource instance you created.",
"_____no_output_____"
]
],
[
[
"# The full unique ID for the dataset\ndataset_id = result.name\n# The short numeric ID for the dataset\ndataset_short_id = dataset_id.split('/')[-1]\n\nprint(dataset_id)",
"_____no_output_____"
]
],
[
[
"### Data preparation\n\nThe Vertex `Dataset` resource for video has some requirements for your data.\n\n- Videos must be stored in a Cloud Storage bucket.\n- Each video file must be in a video format (MPG, AVI, ...).\n- There must be an index file stored in your Cloud Storage bucket that contains the path and label for each video.\n- The index file must be either CSV or JSONL.",
"_____no_output_____"
],
[
"#### CSV\n\nFor video action recognition, the CSV index file has a few requirements:\n\n- No heading.\n- First column is the Cloud Storage path to the video.\n- Second column is the time offset for the start of the video segment to analyze.\n- Third column is the time offset for the end of the video segment to analyze.\n- Fourth column is label for the action (e.g., swing).\n- Fifth column is the time offset for the recognized action.",
"_____no_output_____"
],
[
"#### Location of Cloud Storage training data.\n\nNow set the variable `IMPORT_FILE` to the location of the CSV index file in Cloud Storage.",
"_____no_output_____"
]
],
[
[
"IMPORT_FILES = ['gs://automl-video-demo-data/hmdb_golf_swing_train.csv', 'gs://automl-video-demo-data/hmdb_golf_swing_test.csv']",
"_____no_output_____"
]
],
[
[
"#### Quick peek at your data\n\nYou will use a version of the Golf Swings dataset that is stored in a public Cloud Storage bucket, using a CSV index file.\n\nStart by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (`wc -l`) and then peek at the first few rows.",
"_____no_output_____"
]
],
[
[
"if 'IMPORT_FILES' in globals():\n FILE = IMPORT_FILES[0]\nelse:\n FILE = IMPORT_FILE\n\ncount = ! gsutil cat $FILE | wc -l\nprint(\"Number of Examples\", int(count[0]))\n\nprint(\"First 10 rows\")\n! gsutil cat $FILE | head",
"_____no_output_____"
]
],
[
[
"### Import data\n\nNow, import the data into your Vertex Dataset resource. Use this helper function `import_data` to import the data. The function does the following:\n\n- Uses the `Dataset` client.\n- Calls the client method `import_data`, with the following parameters:\n - `name`: The human readable name you give to the `Dataset` resource (e.g., golf).\n - `import_configs`: The import configuration.\n\n- `import_configs`: A Python list containing a dictionary, with the key/value entries:\n - `gcs_sources`: A list of URIs to the paths of the one or more index files.\n - `import_schema_uri`: The schema identifying the labeling type.\n\nThe `import_data()` method returns a long running `operation` object. This will take a few minutes to complete. If you are in a live tutorial, this would be a good time to ask questions, or take a personal break.",
"_____no_output_____"
]
],
[
[
"def import_data(dataset, gcs_sources, schema):\n config = [{\n 'gcs_source': {'uris': gcs_sources},\n 'import_schema_uri': schema\n }]\n print(\"dataset:\", dataset_id)\n start_time = time.time()\n try:\n operation = clients['dataset'].import_data(name=dataset_id, import_configs=config)\n print(\"Long running operation:\", operation.operation.name)\n\n result = operation.result()\n print(\"result:\", result)\n print(\"time:\", int(time.time() - start_time), \"secs\")\n print(\"error:\", operation.exception())\n print(\"meta :\", operation.metadata)\n print(\"after: running:\", operation.running(), \"done:\", operation.done(), \"cancelled:\", operation.cancelled())\n\n return operation\n except Exception as e:\n print(\"exception:\", e)\n return None\n\n\nimport_data(dataset_id, IMPORT_FILES, LABEL_SCHEMA)",
"_____no_output_____"
]
],
[
[
"## Train the model\n\nNow train an AutoML video action recognition model using your Vertex `Dataset` resource. To train the model, do the following steps:\n\n1. Create an Vertex training pipeline for the `Dataset` resource.\n2. Execute the pipeline to start the training.",
"_____no_output_____"
],
[
"### Create a training pipeline\n\nYou may ask, what do we use a pipeline for? You typically use pipelines when the job (such as training) has multiple steps, generally in sequential order: do step A, do step B, etc. By putting the steps into a pipeline, we gain the benefits of:\n\n1. Being reusable for subsequent training jobs.\n2. Can be containerized and ran as a batch job.\n3. Can be distributed.\n4. All the steps are associated with the same pipeline job for tracking progress.\n\nUse this helper function `create_pipeline`, which takes the following parameters:\n\n- `pipeline_name`: A human readable name for the pipeline job.\n- `model_name`: A human readable name for the model.\n- `dataset`: The Vertex fully qualified dataset identifier.\n- `schema`: The dataset labeling (annotation) training schema.\n- `task`: A dictionary describing the requirements for the training job.\n\nThe helper function calls the `Pipeline` client service'smethod `create_pipeline`, which takes the following parameters:\n\n- `parent`: The Vertex location root path for your `Dataset`, `Model` and `Endpoint` resources.\n- `training_pipeline`: the full specification for the pipeline training job.\n\nLet's look now deeper into the *minimal* requirements for constructing a `training_pipeline` specification:\n\n- `display_name`: A human readable name for the pipeline job.\n- `training_task_definition`: The dataset labeling (annotation) training schema.\n- `training_task_inputs`: A dictionary describing the requirements for the training job.\n- `model_to_upload`: A human readable name for the model.\n- `input_data_config`: The dataset specification.\n - `dataset_id`: The Vertex dataset identifier only (non-fully qualified) -- this is the last part of the fully-qualified identifier.\n - `fraction_split`: If specified, the percentages of the dataset to use for training, test and validation. Otherwise, the percentages are automatically selected by AutoML.\n - Note for video, validation split is not supported -- only training and test.",
"_____no_output_____"
]
],
[
[
" def create_pipeline(pipeline_name, model_name, dataset, schema, task):\n\n dataset_id = dataset.split('/')[-1]\n\n input_config = {'dataset_id': dataset_id,\n 'fraction_split': {\n 'training_fraction': 0.8,\n 'test_fraction': 0.2\n }}\n\n training_pipeline = {\n \"display_name\": pipeline_name,\n \"training_task_definition\": schema,\n \"training_task_inputs\": task,\n \"input_data_config\": input_config,\n \"model_to_upload\": {\"display_name\": model_name},\n }\n\n try:\n pipeline = clients['pipeline'].create_training_pipeline(parent=PARENT, training_pipeline=training_pipeline)\n print(pipeline)\n except Exception as e:\n print(\"exception:\", e)\n return None\n return pipeline",
"_____no_output_____"
]
],
[
[
"### Construct the task requirements\n\nNext, construct the task requirements. Unlike other parameters which take a Python (JSON-like) dictionary, the `task` field takes a Google protobuf Struct, which is very similar to a Python dictionary. Use the `json_format.ParseDict` method for the conversion.\n\nThe minimal fields you need to specify are:\n\n- `model_type`: The type of deployed model, ex. CLOUD for deploying to Google Cloud.\n\nFinally, create the pipeline by calling the helper function `create_pipeline`, which returns an instance of a training pipeline object.",
"_____no_output_____"
]
],
[
[
"PIPE_NAME = \"golf_pipe-\" + TIMESTAMP\nMODEL_NAME = \"golf_model-\" + TIMESTAMP\n\ntask = json_format.ParseDict({'model_type': \"CLOUD\",\n }, Value())\n\nresponse = create_pipeline(PIPE_NAME, MODEL_NAME, dataset_id, TRAINING_SCHEMA, task)",
"_____no_output_____"
]
],
[
[
"Now save the unique identifier of the training pipeline you created.",
"_____no_output_____"
]
],
[
[
"# The full unique ID for the pipeline\npipeline_id = response.name\n# The short numeric ID for the pipeline\npipeline_short_id = pipeline_id.split('/')[-1]\n\nprint(pipeline_id)",
"_____no_output_____"
]
],
[
[
"### Get information on a training pipeline\n\nNow get pipeline information for just this training pipeline instance. The helper function gets the job information for just this job by calling the the job client service's `get_training_pipeline` method, with the following parameter:\n\n- `name`: The Vertex fully qualified pipeline identifier.\n\nWhen the model is done training, the pipeline state will be `PIPELINE_STATE_SUCCEEDED`.",
"_____no_output_____"
]
],
[
[
"def get_training_pipeline(name, silent=False):\n response = clients['pipeline'].get_training_pipeline(name=name)\n if silent:\n return response\n\n print(\"pipeline\")\n print(\" name:\", response.name)\n print(\" display_name:\", response.display_name)\n print(\" state:\", response.state)\n print(\" training_task_definition:\", response.training_task_definition)\n print(\" training_task_inputs:\", dict(response.training_task_inputs))\n print(\" create_time:\", response.create_time)\n print(\" start_time:\", response.start_time)\n print(\" end_time:\", response.end_time)\n print(\" update_time:\", response.update_time)\n print(\" labels:\", dict(response.labels))\n return response\n\n\nresponse = get_training_pipeline(pipeline_id)",
"_____no_output_____"
]
],
[
[
"# Deployment\n\nTraining the above model may take upwards of 240 minutes time.\n\nOnce your model is done training, you can calculate the actual time it took to train the model by subtracting `end_time` from `start_time`. For your model, you will need to know the fully qualified Vertex Model resource identifier, which the pipeline service assigned to it. You can get this from the returned pipeline instance as the field `model_to_deploy.name`.",
"_____no_output_____"
]
],
[
[
"while True:\n response = get_training_pipeline(pipeline_id, True)\n if response.state != aip.PipelineState.PIPELINE_STATE_SUCCEEDED:\n print(\"Training job has not completed:\", response.state)\n model_to_deploy_id = None\n if response.state == aip.PipelineState.PIPELINE_STATE_FAILED:\n raise Exception(\"Training Job Failed\")\n else:\n model_to_deploy = response.model_to_upload\n model_to_deploy_id = model_to_deploy.name\n print(\"Training Time:\", response.end_time - response.start_time)\n break\n time.sleep(60)\n\nprint(\"model to deploy:\", model_to_deploy_id)",
"_____no_output_____"
]
],
[
[
"## Model information\n\nNow that your model is trained, you can get some information on your model.",
"_____no_output_____"
],
[
"## Evaluate the Model resource\n\nNow find out how good the model service believes your model is. As part of training, some portion of the dataset was set aside as the test (holdout) data, which is used by the pipeline service to evaluate the model.",
"_____no_output_____"
],
[
"### List evaluations for all slices\n\nUse this helper function `list_model_evaluations`, which takes the following parameter:\n\n- `name`: The Vertex fully qualified model identifier for the `Model` resource.\n\nThis helper function uses the model client service's `list_model_evaluations` method, which takes the same parameter. The response object from the call is a list, where each element is an evaluation metric.\n\nFor each evaluation -- you probably only have one, you then print all the key names for each metric in the evaluation, and for a small set (`videoActionMetrics`) you will print the result.",
"_____no_output_____"
]
],
[
[
"def list_model_evaluations(name):\n response = clients['model'].list_model_evaluations(parent=name)\n for evaluation in response:\n print(\"model_evaluation\")\n print(\" name:\", evaluation.name)\n print(\" metrics_schema_uri:\", evaluation.metrics_schema_uri)\n metrics = json_format.MessageToDict(evaluation._pb.metrics)\n for metric in metrics.keys():\n print(metric)\n print('videoActionMetrics', metrics['videoActionMetrics'])\n\n\n return evaluation.name\n\n\nlast_evaluation = list_model_evaluations(model_to_deploy_id)",
"_____no_output_____"
]
],
[
[
"## Model deployment for batch prediction\n\nNow deploy the trained Vertex `Model` resource you created for batch prediction. This differs from deploying a `Model` resource for on-demand prediction.\n\nFor online prediction, you:\n\n1. Create an `Endpoint` resource for deploying the `Model` resource to.\n\n2. Deploy the `Model` resource to the `Endpoint` resource.\n\n3. Make online prediction requests to the `Endpoint` resource.\n\nFor batch-prediction, you:\n\n1. Create a batch prediction job.\n\n2. The job service will provision resources for the batch prediction request.\n\n3. The results of the batch prediction request are returned to the caller.\n\n4. The job service will unprovision the resoures for the batch prediction request.",
"_____no_output_____"
],
[
"## Make a batch prediction request\n\nNow do a batch prediction to your deployed model.",
"_____no_output_____"
],
[
"### Get test item(s)\n\nNow do a batch prediction to your Vertex model. You will use arbitrary examples out of the dataset as a test items. Don't be concerned that the examples were likely used in training the model -- we just want to demonstrate how to make a prediction.",
"_____no_output_____"
]
],
[
[
"import json\n\nimport_file = IMPORT_FILES[0]\ntest_items = ! gsutil cat $import_file | head -n2\n\ncols = str(test_items[0]).split(',')\ntest_item_1 = str(cols[0])\ntest_label_1 = str(cols[-1])\n\ncols = str(test_items[1]).split(',')\ntest_item_2 = str(cols[0])\ntest_label_2 = str(cols[-1])\n\nprint(test_item_1, test_label_1)\nprint(test_item_2, test_label_2)",
"_____no_output_____"
]
],
[
[
"### Make a batch input file\n\nNow make a batch input file, which you store in your local Cloud Storage bucket. The batch input file can be either CSV or JSONL. You will use JSONL in this tutorial. For JSONL file, you make one dictionary entry per line for each video. The dictionary contains the key/value pairs:\n\n- `content`: The Cloud Storage path to the video.\n- `mimeType`: The content type. In our example, it is an `avi` file.\n- `timeSegmentStart`: The start timestamp in the video to do prediction on. *Note*, the timestamp must be specified as a string and followed by s (second), m (minute) or h (hour).\n- `timeSegmentEnd`: The end timestamp in the video to do prediction on.",
"_____no_output_____"
]
],
[
[
"import json\n\nimport tensorflow as tf\n\ngcs_input_uri = BUCKET_NAME + '/test.jsonl'\nwith tf.io.gfile.GFile(gcs_input_uri, 'w') as f:\n data = { \"content\": test_item_1, \"mimeType\": \"video/avi\", \"timeSegmentStart\": \"0.0s\", 'timeSegmentEnd': '5.0s' }\n f.write(json.dumps(data) + '\\n')\n data = { \"content\": test_item_2, \"mimeType\": \"video/avi\", \"timeSegmentStart\": \"0.0s\", 'timeSegmentEnd': '5.0s' }\n f.write(json.dumps(data) + '\\n')\n\nprint(gcs_input_uri)\n! gsutil cat $gcs_input_uri",
"_____no_output_____"
]
],
[
[
"### Compute instance scaling\n\nYou have several choices on scaling the compute instances for handling your batch prediction requests:\n\n- Single Instance: The batch prediction requests are processed on a single compute instance.\n - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to one.\n\n- Manual Scaling: The batch prediction requests are split across a fixed number of compute instances that you manually specified.\n - Set the minimum (`MIN_NODES`) and maximum (`MAX_NODES`) number of compute instances to the same number of nodes. When a model is first deployed to the instance, the fixed number of compute instances are provisioned and batch prediction requests are evenly distributed across them.\n\n- Auto Scaling: The batch prediction requests are split across a scaleable number of compute instances.\n - Set the minimum (`MIN_NODES`) number of compute instances to provision when a model is first deployed and to de-provision, and set the maximum (`MAX_NODES) number of compute instances to provision, depending on load conditions.\n\nThe minimum number of compute instances corresponds to the field `min_replica_count` and the maximum number of compute instances corresponds to the field `max_replica_count`, in your subsequent deployment request.",
"_____no_output_____"
]
],
[
[
"MIN_NODES = 1\nMAX_NODES = 1",
"_____no_output_____"
]
],
[
[
"### Make batch prediction request\n\nNow that your batch of two test items is ready, let's do the batch request. Use this helper function `create_batch_prediction_job`, with the following parameters:\n\n- `display_name`: The human readable name for the prediction job.\n- `model_name`: The Vertex fully qualified identifier for the `Model` resource.\n- `gcs_source_uri`: The Cloud Storage path to the input file -- which you created above.\n- `gcs_destination_output_uri_prefix`: The Cloud Storage path that the service will write the predictions to.\n- `parameters`: Additional filtering parameters for serving prediction results.\n\nThe helper function calls the job client service's `create_batch_prediction_job` metho, with the following parameters:\n\n- `parent`: The Vertex location root path for Dataset, Model and Pipeline resources.\n- `batch_prediction_job`: The specification for the batch prediction job.\n\nLet's now dive into the specification for the `batch_prediction_job`:\n\n- `display_name`: The human readable name for the prediction batch job.\n- `model`: The Vertex fully qualified identifier for the `Model` resource.\n- `dedicated_resources`: The compute resources to provision for the batch prediction job.\n - `machine_spec`: The compute instance to provision. Use the variable you set earlier `DEPLOY_GPU != None` to use a GPU; otherwise only a CPU is allocated.\n - `starting_replica_count`: The number of compute instances to initially provision, which you set earlier as the variable `MIN_NODES`.\n - `max_replica_count`: The maximum number of compute instances to scale to, which you set earlier as the variable `MAX_NODES`.\n- `model_parameters`: Additional filtering parameters for serving prediction results.\n - `confidenceThreshold`: The minimum confidence threshold on doing a prediction.\n - `maxPredictions`: The maximum number of predictions to return per action, sorted by confidence.\n- `input_config`: The input source and format type for the instances to predict.\n - `instances_format`: The format of the batch prediction request file: `csv` or `jsonl`.\n - `gcs_source`: A list of one or more Cloud Storage paths to your batch prediction requests.\n- `output_config`: The output destination and format for the predictions.\n - `prediction_format`: The format of the batch prediction response file: `jsonl` only.\n - `gcs_destination`: The output destination for the predictions.\n\nYou might ask, how does confidence_threshold affect the model accuracy? The threshold won't change the accuracy. What it changes is recall and precision.\n\n - Precision: The higher the precision the more likely what is predicted is the correct prediction, but return fewer predictions. Increasing the confidence threshold increases precision.\n - Recall: The higher the recall the more likely a correct prediction is returned in the result, but return more prediction with incorrect prediction. Decreasing the confidence threshold increases recall.\n\nIn this example, you will predict for precision. You set the confidence threshold to 0.5 and the maximum number of predictions for an action to two. Since, all the confidence values across the classes must add up to one, there are only two possible outcomes:\n\n 1. There is a tie, both 0.5, and returns two predictions.\n 2. One value is above 0.5 and the rest are below 0.5, and returns one prediction.\n\nThis call is an asychronous operation. You will print from the response object a few select fields, including:\n\n- `name`: The Vertex fully qualified identifier assigned to the batch prediction job.\n- `display_name`: The human readable name for the prediction batch job.\n- `model`: The Vertex fully qualified identifier for the Model resource.\n- `generate_explanations`: Whether True/False explanations were provided with the predictions (explainability).\n- `state`: The state of the prediction job (pending, running, etc).\n\nSince this call will take a few moments to execute, you will likely get `JobState.JOB_STATE_PENDING` for `state`.",
"_____no_output_____"
]
],
[
[
"BATCH_MODEL = \"golf_batch-\" + TIMESTAMP\n\n\ndef create_batch_prediction_job(display_name, model_name, gcs_source_uri, gcs_destination_output_uri_prefix, parameters=None):\n\n if DEPLOY_GPU:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_type\": DEPLOY_GPU,\n \"accelerator_count\": DEPLOY_NGPU,\n }\n else:\n machine_spec = {\n \"machine_type\": DEPLOY_COMPUTE,\n \"accelerator_count\": 0,\n }\n\n batch_prediction_job = {\n \"display_name\": display_name,\n # Format: 'projects/{project}/locations/{location}/models/{model_id}'\n \"model\": model_name,\n \"model_parameters\": json_format.ParseDict(parameters, Value()),\n \"input_config\": {\n \"instances_format\": IN_FORMAT,\n \"gcs_source\": {\"uris\": [gcs_source_uri]},\n },\n \"output_config\": {\n \"predictions_format\": OUT_FORMAT,\n \"gcs_destination\": {\"output_uri_prefix\": gcs_destination_output_uri_prefix},\n },\n \"dedicated_resources\": {\n \"machine_spec\": machine_spec,\n \"starting_replica_count\": MIN_NODES,\n \"max_replica_count\": MAX_NODES\n }\n\n }\n response = clients['job'].create_batch_prediction_job(\n parent=PARENT, batch_prediction_job=batch_prediction_job\n )\n print(\"response\")\n print(\" name:\", response.name)\n print(\" display_name:\", response.display_name)\n print(\" model:\", response.model)\n try:\n print(\" generate_explanation:\", response.generate_explanation)\n except:\n pass\n print(\" state:\", response.state)\n print(\" create_time:\", response.create_time)\n print(\" start_time:\", response.start_time)\n print(\" end_time:\", response.end_time)\n print(\" update_time:\", response.update_time)\n print(\" labels:\", response.labels)\n return response\n\n\nIN_FORMAT = 'jsonl'\nOUT_FORMAT = 'jsonl' # [jsonl]\n\nresponse = create_batch_prediction_job(BATCH_MODEL, model_to_deploy_id, gcs_input_uri, BUCKET_NAME,\n {'confidenceThreshold': 0.5, 'maxPredictions': 2})",
"_____no_output_____"
]
],
[
[
"Now get the unique identifier for the batch prediction job you created.",
"_____no_output_____"
]
],
[
[
"# The full unique ID for the batch job\nbatch_job_id = response.name\n# The short numeric ID for the batch job\nbatch_job_short_id = batch_job_id.split('/')[-1]\n\nprint(batch_job_id)",
"_____no_output_____"
]
],
[
[
"### Get information on a batch prediction job\n\nUse this helper function `get_batch_prediction_job`, with the following paramter:\n\n- `job_name`: The Vertex fully qualified identifier for the batch prediction job.\n\nThe helper function calls the job client service's `get_batch_prediction_job` method, with the following paramter:\n\n- `name`: The Vertex fully qualified identifier for the batch prediction job. In this tutorial, you will pass it the Vertex fully qualified identifier for your batch prediction job -- `batch_job_id`\n\nThe helper function will return the Cloud Storage path to where the predictions are stored -- `gcs_destination`.",
"_____no_output_____"
]
],
[
[
"def get_batch_prediction_job(job_name, silent=False):\n response = clients['job'].get_batch_prediction_job(name=job_name)\n if silent:\n return response.output_config.gcs_destination.output_uri_prefix, response.state\n\n print(\"response\")\n print(\" name:\", response.name)\n print(\" display_name:\", response.display_name)\n print(\" model:\", response.model)\n try: # not all data types support explanations\n print(\" generate_explanation:\", response.generate_explanation)\n except:\n pass\n print(\" state:\", response.state)\n print(\" error:\", response.error)\n gcs_destination = response.output_config.gcs_destination\n print(\" gcs_destination\")\n print(\" output_uri_prefix:\", gcs_destination.output_uri_prefix)\n return gcs_destination.output_uri_prefix, response.state\n\n\npredictions, state = get_batch_prediction_job(batch_job_id)",
"_____no_output_____"
]
],
[
[
"### Get the predictions\n\nWhen the batch prediction is done processing, the job state will be `JOB_STATE_SUCCEEDED`.\n\nFinally you view the predictions stored at the Cloud Storage path you set as output. The predictions will be in a JSONL format, which you indicated at the time you made the batch prediction job, under a subfolder starting with the name `prediction`, and under that folder will be a file called `predictions*.jsonl`.\n\nNow display (cat) the contents. You will see multiple JSON objects, one for each prediction.\n\nFor each prediction:\n\n- `content`: The video that was input for the prediction request.\n- `displayName`: The prediction action.\n- `confidence`: The confidence in the prediction between 0 and 1.\n- `timeSegmentStart/timeSegmentEnd`: The time offset of the start and end of the predicted action.",
"_____no_output_____"
]
],
[
[
"def get_latest_predictions(gcs_out_dir):\n ''' Get the latest prediction subfolder using the timestamp in the subfolder name'''\n folders = !gsutil ls $gcs_out_dir\n latest = \"\"\n for folder in folders:\n subfolder = folder.split('/')[-2]\n if subfolder.startswith('prediction-'):\n if subfolder > latest:\n latest = folder[:-1]\n return latest\n\n\nwhile True:\n predictions, state = get_batch_prediction_job(batch_job_id, True)\n if state != aip.JobState.JOB_STATE_SUCCEEDED:\n print(\"The job has not completed:\", state)\n if state == aip.JobState.JOB_STATE_FAILED:\n raise Exception(\"Batch Job Failed\")\n else:\n folder = get_latest_predictions(predictions)\n ! gsutil ls $folder/prediction*.jsonl\n\n ! gsutil cat $folder/prediction*.jsonl\n break\n time.sleep(60)",
"_____no_output_____"
]
],
[
[
"# Cleaning up\n\nTo clean up all GCP resources used in this project, you can [delete the GCP\nproject](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.\n\nOtherwise, you can delete the individual resources you created in this tutorial:\n\n- Dataset\n- Pipeline\n- Model\n- Endpoint\n- Batch Job\n- Custom Job\n- Hyperparameter Tuning Job\n- Cloud Storage Bucket",
"_____no_output_____"
]
],
[
[
"delete_dataset = True\ndelete_pipeline = True\ndelete_model = True\ndelete_endpoint = True\ndelete_batchjob = True\ndelete_customjob = True\ndelete_hptjob = True\ndelete_bucket = True\n\n# Delete the dataset using the Vertex fully qualified identifier for the dataset\ntry:\n if delete_dataset and 'dataset_id' in globals():\n clients['dataset'].delete_dataset(name=dataset_id)\nexcept Exception as e:\n print(e)\n\n# Delete the training pipeline using the Vertex fully qualified identifier for the pipeline\ntry:\n if delete_pipeline and 'pipeline_id' in globals():\n clients['pipeline'].delete_training_pipeline(name=pipeline_id)\nexcept Exception as e:\n print(e)\n\n# Delete the model using the Vertex fully qualified identifier for the model\ntry:\n if delete_model and 'model_to_deploy_id' in globals():\n clients['model'].delete_model(name=model_to_deploy_id)\nexcept Exception as e:\n print(e)\n\n# Delete the endpoint using the Vertex fully qualified identifier for the endpoint\ntry:\n if delete_endpoint and 'endpoint_id' in globals():\n clients['endpoint'].delete_endpoint(name=endpoint_id)\nexcept Exception as e:\n print(e)\n\n# Delete the batch job using the Vertex fully qualified identifier for the batch job\ntry:\n if delete_batchjob and 'batch_job_id' in globals():\n clients['job'].delete_batch_prediction_job(name=batch_job_id)\nexcept Exception as e:\n print(e)\n\n# Delete the custom job using the Vertex fully qualified identifier for the custom job\ntry:\n if delete_customjob and 'job_id' in globals():\n clients['job'].delete_custom_job(name=job_id)\nexcept Exception as e:\n print(e)\n\n# Delete the hyperparameter tuning job using the Vertex fully qualified identifier for the hyperparameter tuning job\ntry:\n if delete_hptjob and 'hpt_job_id' in globals():\n clients['job'].delete_hyperparameter_tuning_job(name=hpt_job_id)\nexcept Exception as e:\n print(e)\n\nif delete_bucket and 'BUCKET_NAME' in globals():\n ! gsutil rm -r $BUCKET_NAME",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fac4b67f9b50bd90d726cfb0a9ca81a0ec5d39 | 36,047 | ipynb | Jupyter Notebook | sagemaker-debugger/mnist_tensor_analysis/mnist_tensor_analysis.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
] | 5 | 2019-01-19T23:53:35.000Z | 2022-01-29T14:04:31.000Z | sagemaker-debugger/mnist_tensor_analysis/mnist_tensor_analysis.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
] | 4 | 2020-09-26T01:30:01.000Z | 2022-02-10T02:20:35.000Z | sagemaker-debugger/mnist_tensor_analysis/mnist_tensor_analysis.ipynb | P15241328/amazon-sagemaker-examples | 00cba545be0822474f070321a62d22865187e09b | [
"Apache-2.0"
] | 7 | 2020-03-04T22:23:51.000Z | 2021-07-13T14:05:46.000Z | 31.290799 | 503 | 0.558715 | [
[
[
"# Tensor analysis using Amazon SageMaker Debugger\n\nLooking at the distributions of activation inputs/outputs, gradients and weights per layer can give useful insights. For instance, it helps to understand whether the model runs into problems like neuron saturation, whether there are layers in your model that are not learning at all or whether the network consists of too many layers etc. \n\nThe following animation shows the distribution of gradients of a convolutional layer from an example application as the training progresses. We can see that it starts as Gaussian distribution but then becomes more and more narrow. We can also see that the range of gradients starts very small (order of $1e-5$) and becomes even tinier as training progresses. If tiny gradients are observed from the start of training, it is an indication that we should check the hyperparameters of our model. \n\n![](images/example.gif)\n\nIn this notebook we will train a poorly configured neural network and use Amazon SageMaker Debugger with custom rules to aggregate and analyse specific tensors. Before we proceed let us install the smdebug binary which allows us to perform interactive analysis in this notebook. After installing it, please restart the kernel, and when you come back skip this cell.\n\n### Installing smdebug",
"_____no_output_____"
]
],
[
[
"! python -m pip install smdebug",
"_____no_output_____"
]
],
[
[
"### Configuring the inputs for the training job\n\nNow we'll call the Sagemaker MXNet Estimator to kick off a training job . The `entry_point_script` points to the MXNet training script. The users can create a custom *SessionHook* in their training script. If they chose not to create such hook in the training script (similar to the one we will be using in this example) Amazon SageMaker Debugger will create the appropriate *SessionHook* based on specified *DebugHookConfig* parameters.\n\nThe `hyperparameters` are the parameters that will be passed to the training script. We choose `Uniform(1)` as initializer and learning rate of `0.001`. This leads to the model not training well because the model is poorly initialized.\n\nThe goal of a good intialization is \n- to break the symmetry such that parameters do not receive same gradients and updates\n- to keep variance similar across layers\n\nA bad intialization may lead to vanishing or exploiding gradients and the model not training at all. Once the training is running we will look at the distirbutions of activation inputs/outputs, gradients and weights across the training to see how these hyperparameters influenced the training.\n",
"_____no_output_____"
]
],
[
[
"entry_point_script = 'mnist.py'\nbad_hyperparameters = {'initializer': 2, 'lr': 0.001}",
"_____no_output_____"
],
[
"import sagemaker\nfrom sagemaker.mxnet import MXNet\nfrom sagemaker.debugger import DebuggerHookConfig, CollectionConfig\nimport boto3\nimport os\n\nestimator = MXNet(role=sagemaker.get_execution_role(),\n base_job_name='mxnet',\n train_instance_count=1,\n train_instance_type='ml.m5.xlarge',\n train_volume_size=400,\n source_dir='src',\n entry_point=entry_point_script,\n hyperparameters=bad_hyperparameters,\n framework_version='1.6.0',\n py_version='py3',\n debugger_hook_config = DebuggerHookConfig(\n collection_configs=[\n CollectionConfig(\n name=\"all\",\n parameters={\n \"include_regex\": \".*\",\n \"save_interval\": \"100\"\n }\n )\n ]\n )\n )",
"_____no_output_____"
]
],
[
[
"Start the training job",
"_____no_output_____"
]
],
[
[
"estimator.fit(wait=False)",
"_____no_output_____"
]
],
[
[
"### Get S3 location of tensors\n\nWe can get information related to the training job:",
"_____no_output_____"
]
],
[
[
"job_name = estimator.latest_training_job.name\nclient = estimator.sagemaker_session.sagemaker_client\ndescription = client.describe_training_job(TrainingJobName=job_name)\ndescription",
"_____no_output_____"
]
],
[
[
"We can retrieve the S3 location of the tensors:",
"_____no_output_____"
]
],
[
[
"path = estimator.latest_job_debugger_artifacts_path()\nprint('Tensors are stored in: ', path)",
"_____no_output_____"
]
],
[
[
"We can check the status of our training job, by executing `describe_training_job`:",
"_____no_output_____"
]
],
[
[
"job_name = estimator.latest_training_job.name\nprint('Training job name: {}'.format(job_name))\n\nclient = estimator.sagemaker_session.sagemaker_client\n\ndescription = client.describe_training_job(TrainingJobName=job_name)",
"_____no_output_____"
]
],
[
[
"We can access the tensors from S3 once the training job is in status `Training` or `Completed`. In the following code cell we check the job status:",
"_____no_output_____"
]
],
[
[
"import time\n\nif description['TrainingJobStatus'] != 'Completed':\n while description['SecondaryStatus'] not in {'Training', 'Completed'}:\n description = client.describe_training_job(TrainingJobName=job_name)\n primary_status = description['TrainingJobStatus']\n secondary_status = description['SecondaryStatus']\n print('Current job status: [PrimaryStatus: {}, SecondaryStatus: {}]'.format(primary_status, secondary_status))\n time.sleep(15)",
"_____no_output_____"
]
],
[
[
"Once the job is in status `Training` or `Completed`, we can create the trial that allows us to access the tensors in Amazon S3. ",
"_____no_output_____"
]
],
[
[
"from smdebug.trials import create_trial\n\ntrial1 = create_trial(path)",
"_____no_output_____"
]
],
[
[
"We can check the available steps. A step presents one forward and backward pass.",
"_____no_output_____"
]
],
[
[
"trial1.steps()",
"_____no_output_____"
]
],
[
[
"As training progresses more steps will become available. \n\nNext we will access specific tensors like weights, gradients and activation outputs and plot their distributions. We will use Amazon SageMaker Debugger and define custom rules to retrieve certain tensors. Rules are supposed to return True or False. However in this notebook we will use custom rules to store dictionaries of aggregated tensors per layer and step, which we then plot afterwards.\n\nA custom rule inherits from the smdebug Rule class and implements the function `invoke_at_step`. This function is called everytime tensors of a new step become available:\n\n```\n\nfrom smdebug.rules.rule import Rule\n\nclass MyCustomRule(Rule):\n def __init__(self, base_trial):\n super().__init__(base_trial)\n \n def invoke_at_step(self, step): \n if np.max(self.base_trial.tensor('conv0_relu_output_0').value(step) < 0.001:\n return True\n return False\n\n``` \n\nAbove example rule checks if the first convolutional layer outputs only small values. If so the rule returns `True` which corresponds to an `Issue found`, otherwise False `No Issue found`.\n\n\n### Activation outputs\nThis rule will use Amazon SageMaker Debugger to retrieve tensors from the ReLU output layers. It sums the activations across batch and steps. If there is a large fraction of ReLUs outputing 0 across many steps it means that the neuron is dying.",
"_____no_output_____"
]
],
[
[
"from smdebug.trials import create_trial\nfrom smdebug.rules.rule_invoker import invoke_rule\nfrom smdebug.exceptions import NoMoreData\nfrom smdebug.rules.rule import Rule\nimport numpy as np\nimport utils\nimport collections\nimport os\nfrom IPython.display import Image",
"_____no_output_____"
],
[
"class ActivationOutputs(Rule):\n def __init__(self, base_trial):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*relu_output'):\n if \"gradients\" not in tname:\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = collections.OrderedDict()\n if step not in self.tensors[tname]:\n self.tensors[tname][step] = 0\n neg_values = np.where(tensor <= 0)[0]\n if len(neg_values) > 0:\n self.logger.info(f\" Step {step} tensor {tname} has {len(neg_values)/tensor.size*100}% activation outputs which are smaller than 0 \")\n batch_over_sum = np.sum(tensor, axis=0)/tensor.shape[0]\n self.tensors[tname][step] += batch_over_sum\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = ActivationOutputs(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/activation_outputs.gif')",
"_____no_output_____"
],
[
"Image(url='images/activation_outputs.gif')",
"_____no_output_____"
]
],
[
[
"### Activation Inputs\nIn this rule we look at the inputs into activation function, rather than the output. This can be helpful to understand if there are extreme negative or positive values that saturate the activation functions. ",
"_____no_output_____"
]
],
[
[
"class ActivationInputs(Rule):\n def __init__(self, base_trial):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*relu_input'):\n if \"gradients\" not in tname:\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = {}\n if step not in self.tensors[tname]:\n self.tensors[tname][step] = 0\n neg_values = np.where(tensor <= 0)[0]\n if len(neg_values) > 0:\n self.logger.info(f\" Tensor {tname} has {len(neg_values)/tensor.size*100}% activation inputs which are smaller than 0 \")\n batch_over_sum = np.sum(tensor, axis=0)/tensor.shape[0]\n self.tensors[tname][step] += batch_over_sum\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = ActivationInputs(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/activation_inputs.gif')",
"_____no_output_____"
]
],
[
[
"We can see that second convolutional layer `conv1_relu_input_0` receives only negative input values, which means that all ReLUs in this layer output 0.",
"_____no_output_____"
]
],
[
[
"Image(url='images/activation_inputs.gif')",
"_____no_output_____"
]
],
[
[
"### Gradients\nThe following code retrieves the gradients and plots their distribution. If variance is tiny, that means that the model parameters do not get updated effectively with each training step or that the training has converged to a minimum.",
"_____no_output_____"
]
],
[
[
"class GradientsLayer(Rule):\n def __init__(self, base_trial):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*gradient'):\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = {}\n\n self.logger.info(f\" Tensor {tname} has gradients range: {np.min(tensor)} {np.max(tensor)} \")\n self.tensors[tname][step] = tensor\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = GradientsLayer(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/gradients.gif')",
"_____no_output_____"
],
[
"Image(url='images/gradients.gif')",
"_____no_output_____"
]
],
[
[
"### Check variance across layers\nThe rule retrieves gradients, but this time we compare variance of gradient distribution across layers. We want to identify if there is a large difference between the min and max variance per training step. For instance, very deep neural networks may suffer from vanishing gradients the deeper we go. By checking this ratio we can determine if we run into such a situation.",
"_____no_output_____"
]
],
[
[
"class GradientsAcrossLayers(Rule):\n def __init__(self, base_trial, ):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*gradient'):\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if step not in self.tensors:\n self.tensors[step] = [np.inf, 0]\n variance = np.var(tensor.flatten())\n if variance < self.tensors[step][0]:\n self.tensors[step][0] = variance\n elif variance > self.tensors[step][1]:\n self.tensors[step][1] = variance \n self.logger.info(f\" Step {step} current ratio: {self.tensors[step][0]} {self.tensors[step][1]} Ratio: {self.tensors[step][1] / self.tensors[step][0]}\") \n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = GradientsAcrossLayers(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')",
"_____no_output_____"
]
],
[
[
"Let's check min and max values of the gradients across layers:",
"_____no_output_____"
]
],
[
[
"for step in rule.tensors:\n print(\"Step\", step, \"variance of gradients: \", rule.tensors[step][0], \" to \", rule.tensors[step][1])",
"_____no_output_____"
]
],
[
[
"### Distribution of weights\nThis rule retrieves the weight tensors and checks the variance. If the distribution does not change much across steps it may indicate that the learning rate is too low, that gradients are too small or that the training has converged to a minimum.",
"_____no_output_____"
]
],
[
[
"class WeightRatio(Rule):\n def __init__(self, base_trial, ):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*weight'):\n if \"gradient\" not in tname:\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = {}\n \n self.logger.info(f\" Tensor {tname} has weights with variance: {np.var(tensor.flatten())} \")\n self.tensors[tname][step] = tensor\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = WeightRatio(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/weights.gif')",
"_____no_output_____"
],
[
"Image(url='images/weights.gif')",
"_____no_output_____"
]
],
[
[
"### Inputs\n\nThis rule retrieves layer inputs excluding activation inputs.",
"_____no_output_____"
]
],
[
[
"class Inputs(Rule):\n def __init__(self, base_trial, ):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*input'):\n if \"relu\" not in tname:\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = {}\n \n self.logger.info(f\" Tensor {tname} has inputs with variance: {np.var(tensor.flatten())} \")\n self.tensors[tname][step] = tensor\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = Inputs(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/layer_inputs.gif')",
"_____no_output_____"
],
[
"Image(url='images/layer_inputs.gif')",
"_____no_output_____"
]
],
[
[
"### Layer outputs\nThis rule retrieves outputs of layers excluding activation outputs.",
"_____no_output_____"
]
],
[
[
"class Outputs(Rule):\n def __init__(self, base_trial, ):\n super().__init__(base_trial) \n self.tensors = collections.OrderedDict() \n \n def invoke_at_step(self, step):\n for tname in self.base_trial.tensor_names(regex='.*output'):\n if \"relu\" not in tname:\n try:\n tensor = self.base_trial.tensor(tname).value(step)\n if tname not in self.tensors:\n self.tensors[tname] = {}\n \n self.logger.info(f\" Tensor {tname} has inputs with variance: {np.var(tensor.flatten())} \")\n self.tensors[tname][step] = tensor\n except:\n self.logger.warning(f\"Can not fetch tensor {tname}\")\n return False\n\nrule = Outputs(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(rule.tensors, filename='images/layer_outputs.gif')",
"_____no_output_____"
],
[
"Image(url='images/layer_outputs.gif')",
"_____no_output_____"
]
],
[
[
"### Comparison \nIn the previous section we have looked at the distribution of gradients, activation outputs and weights of a model that has not trained well due to poor initialization. Now we will compare some of these distributions with a model that has been well intialized.",
"_____no_output_____"
]
],
[
[
"entry_point_script = 'mnist.py'\nhyperparameters = {'lr': 0.01}",
"_____no_output_____"
],
[
"estimator = MXNet(role=sagemaker.get_execution_role(),\n base_job_name='mxnet',\n train_instance_count=1,\n train_instance_type='ml.m5.xlarge',\n train_volume_size=400,\n source_dir='src',\n entry_point=entry_point_script,\n hyperparameters=hyperparameters,\n framework_version='1.6.0',\n py_version='py3',\n debugger_hook_config = DebuggerHookConfig(\n collection_configs=[\n CollectionConfig(\n name=\"all\",\n parameters={\n \"include_regex\": \".*\",\n \"save_interval\": \"100\"\n }\n )\n ]\n )\n )\n ",
"_____no_output_____"
]
],
[
[
"Start the training job",
"_____no_output_____"
]
],
[
[
"estimator.fit(wait=False)",
"_____no_output_____"
]
],
[
[
"Get S3 path where tensors have been stored",
"_____no_output_____"
]
],
[
[
"path = estimator.latest_job_debugger_artifacts_path()\nprint('Tensors are stored in: ', path)",
"_____no_output_____"
]
],
[
[
"Check the status of the training job:",
"_____no_output_____"
]
],
[
[
"job_name = estimator.latest_training_job.name\nprint('Training job name: {}'.format(job_name))\n\nclient = estimator.sagemaker_session.sagemaker_client\n\ndescription = client.describe_training_job(TrainingJobName=job_name)\n\nif description['TrainingJobStatus'] != 'Completed':\n while description['SecondaryStatus'] not in {'Training', 'Completed'}:\n description = client.describe_training_job(TrainingJobName=job_name)\n primary_status = description['TrainingJobStatus']\n secondary_status = description['SecondaryStatus']\n print('Current job status: [PrimaryStatus: {}, SecondaryStatus: {}]'.format(primary_status, secondary_status))\n time.sleep(15)",
"_____no_output_____"
]
],
[
[
"Now we create a new trial object `trial2`:",
"_____no_output_____"
]
],
[
[
"from smdebug.trials import create_trial\n\ntrial2 = create_trial(path)",
"_____no_output_____"
]
],
[
[
"#### Gradients\n\nLets compare distribution of gradients of the convolutional layers of both trials. `trial` is the trial object of the first training job, `trial2` is the trial object of second training job. We can now easily compare tensors from both training jobs.",
"_____no_output_____"
]
],
[
[
"rule = GradientsLayer(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
],
[
"dict_gradients = {}\ndict_gradients['gradient/conv0_weight_bad_hyperparameters'] = rule.tensors['gradient/conv0_weight']\ndict_gradients['gradient/conv1_weight_bad_hyperparameters'] = rule.tensors['gradient/conv1_weight']",
"_____no_output_____"
]
],
[
[
"Second trial:",
"_____no_output_____"
]
],
[
[
"rule = GradientsLayer(trial2)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
],
[
"dict_gradients['gradient/conv0_weight_good_hyperparameters'] = rule.tensors['gradient/conv0_weight']\ndict_gradients['gradient/conv1_weight_good_hyperparameters'] = rule.tensors['gradient/conv1_weight']",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(dict_gradients, filename='images/gradients_comparison.gif')",
"_____no_output_____"
]
],
[
[
"In the case of the poorly initalized model, gradients are fluctuating a lot leading to very high variance. ",
"_____no_output_____"
]
],
[
[
"Image(url='images/gradients_comparison.gif')",
"_____no_output_____"
]
],
[
[
"#### Activation inputs\n\nLets compare distribution of activation inputs of both trials.",
"_____no_output_____"
]
],
[
[
"rule = ActivationInputs(trial1)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
],
[
"dict_activation_inputs = {}\ndict_activation_inputs['conv0_relu_input_0_bad_hyperparameters'] = rule.tensors['conv0_relu_input_0']\ndict_activation_inputs['conv1_relu_input_0_bad_hyperparameters'] = rule.tensors['conv1_relu_input_0']",
"_____no_output_____"
]
],
[
[
"Second trial",
"_____no_output_____"
]
],
[
[
"rule = ActivationInputs(trial2)\ntry:\n invoke_rule(rule)\nexcept NoMoreData:\n print('The training has ended and there is no more data to be analyzed. This is expected behavior.')\n",
"_____no_output_____"
],
[
"dict_activation_inputs['conv0_relu_input_0_good_hyperparameters'] = rule.tensors['conv0_relu_input_0']\ndict_activation_inputs['conv1_relu_input_0_good_hyperparameters'] = rule.tensors['conv1_relu_input_0']",
"_____no_output_____"
]
],
[
[
"Plot the histograms",
"_____no_output_____"
]
],
[
[
"utils.create_interactive_matplotlib_histogram(dict_activation_inputs, filename='images/activation_inputs_comparison.gif')",
"_____no_output_____"
]
],
[
[
"The distribution of activation inputs into first activation layer `conv0_relu_input_0` look quite similar in both trials. However in the case of the second layer they drastically differ. ",
"_____no_output_____"
]
],
[
[
"Image(url='images/activation_inputs_comparison.gif')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fad06b9068bf385b4515650ba22feffa990492 | 22,320 | ipynb | Jupyter Notebook | site/ja/tutorials/text/word_embeddings.ipynb | mulka/docs | c285b476d3ca3ff9e031abe9c922fb5a69da9424 | [
"Apache-2.0"
] | null | null | null | site/ja/tutorials/text/word_embeddings.ipynb | mulka/docs | c285b476d3ca3ff9e031abe9c922fb5a69da9424 | [
"Apache-2.0"
] | null | null | null | site/ja/tutorials/text/word_embeddings.ipynb | mulka/docs | c285b476d3ca3ff9e031abe9c922fb5a69da9424 | [
"Apache-2.0"
] | null | null | null | 32.536443 | 429 | 0.546192 | [
[
[
"##### Copyright 2019 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# 単語埋め込み (Word embeddings)",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/tutorials/text/word_embeddings\">\n <img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />\n View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/ja/tutorials/text/word_embeddings.ipynb\">\n <img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />\n Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/ja/tutorials/text/word_embeddings.ipynb\">\n <img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />\n View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/ja/tutorials/text/word_embeddings.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"Note: これらのドキュメントは私たちTensorFlowコミュニティが翻訳したものです。コミュニティによる 翻訳は**ベストエフォート**であるため、この翻訳が正確であることや[英語の公式ドキュメント](https://www.tensorflow.org/?hl=en)の 最新の状態を反映したものであることを保証することはできません。 この翻訳の品質を向上させるためのご意見をお持ちの方は、GitHubリポジトリ[tensorflow/docs](https://github.com/tensorflow/docs)にプルリクエストをお送りください。 コミュニティによる翻訳やレビューに参加していただける方は、 [[email protected] メーリングリスト](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs-ja)にご連絡ください。",
"_____no_output_____"
],
[
"このチュートリアルでは、単語埋め込みを紹介します。このチュートリアルには、小さいデータセットを使って単語埋め込みを最初から学習させ、その埋め込みベクトルを [Embedding Projector](http://projector.tensorflow.org) (下図参照)を使って可視化するためのプログラムがすべて含まれています。\n\n<img src=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding.jpg?raw=1\" alt=\"Screenshot of the embedding projector\" width=\"400\"/>\n\n## テキストを数値で表す\n\n機械学習モデルは、ベクトル(数値の配列)を入力として受け取ります。テキストを扱う際、最初に決めなければならないのは、文字列を機械学習モデルに入力する前に、数値に変換する(あるいはテキストを「ベクトル化」する)ための戦略です。このセクションでは、これを行う3つの戦略を見てみます。\n\n### ワンホット・エンコーディング\n\n最初のアイデアとして、ボキャブラリの中の単語それぞれを「ワンホット」エンコードするというのがあります。 \"The cat sat on the mat\" という文を考えてみましょう。この文に含まれるボキャブラリ(ユニークな単語)は、 (cat, mat, on, sat, the) です。それぞれの単語を表現するため、ボキャブラリの長さに等しいゼロベクトルを作り、その単語に対応するインデックスの場所に 1 を立てます。これを下図で示します。 \n\n<img src=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/one-hot.png?raw=1\" alt=\"Diagram of one-hot encodings\" width=\"400\" />\n\n文をエンコードしたベクトルを作成するには、その後、それぞれの単語のワンホット・ベクトルをつなげればよいのです。\n\nKey point: この手法は非効率です。ワンホット・エンコードされたベクトルは疎(つまり、ほとんどのインデックスではゼロ)です。ボキャブラリに 10,000 の単語があると考えてみましょう。単語をすべてワンホット・エンコードするということは、要素の 99.99% がゼロであるベクトルを作ることになります。\n\n### それぞれの単語をユニークな数値としてエンコードする\n\n2つ目のアプローチとして、それぞれの単語をユニークな数値でエンコードするというのがあります。上記の例をとれば、\"cat\" に 1、\"mat\" に 2、というふうに番号を割り当てることができます。そうすれば、 \"The cat sat on the mat\" という文は、 [5, 1, 4, 3, 5, 2] という密なベクトルで表すことができます。この手法は効率的です。疎なベクトルの代わりに、密な(すべての要素が入っている)ベクトルが得られます。\n\nしかしながら、このアプローチには 2つの欠点があります。\n\n* 整数エンコーディングは勝手に決めたものです(単語間のいかなる関係性も含んでいません)。\n\n* 整数エンコーディングはモデルにとっては解釈しにくいものです。たとえば、線形分類器はそれぞれの特徴量について単一の重みしか学習しません。したがって、2つの単語が似かよっていることと、それらのエンコーディングが似かよっていることの間には、なんの関係もありません。この特徴と重みの組み合わせには意味がありません。\n\n### 単語埋め込み\n\n単語埋め込みを使うと、似たような単語が似たようにエンコードされる、効率的で密な表現が得られます。重要なのは、このエンコーディングを手動で行う必要がないということです。埋め込みは浮動小数点数の密なベクトルです(そのベクトルの長さはあなたが指定するパラメータです)。埋め込みベクトルの値は指定するものではなく、学習されるパラメータです(モデルが密結合レイヤーの重みを学習するように、訓練をとおしてモデルが学習する重みです)。一般的には、(小さいデータセットの場合の)8次元の埋め込みベクトルから、大きなデータセットを扱う 1024次元のものまで見られます。高次元の埋め込みは単語間の細かな関係を取得できますが、学習にはよりたくさんのデータが必要です。\n\n<img src=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding2.png?raw=1\" alt=\"Diagram of an embedding\" width=\"400\"/>\n\n上図は単語埋め込みを図示したものです。それぞれの単語が 4次元の浮動小数点数のベクトルで表されています。埋め込みは「参照テーブル」と考えることもできます。重みが学習された後では、テーブルを参照して、それぞれの単語を対応する密ベクトルにエンコードできます。",
"_____no_output_____"
],
[
"## 設定",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\ntry:\n # %tensorflow_version は Colab 中でのみ使用できます\n !pip install tf-nightly\nexcept Exception:\n pass\nimport tensorflow as tf",
"_____no_output_____"
],
[
"from tensorflow import keras\nfrom tensorflow.keras import layers\n\nimport tensorflow_datasets as tfds\ntfds.disable_progress_bar()",
"_____no_output_____"
]
],
[
[
"## Embedding レイヤーを使う\n\nKeras では単語埋め込みを使うのも簡単です。[Embedding](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding) レイヤーを見てみましょう。",
"_____no_output_____"
],
[
"Embedding レイヤーは、(特定の単語を示す)整数のインデックスに(その埋め込みである)密なベクトルを対応させる参照テーブルとして理解することができます。埋め込みの次元数(あるいはその幅)は、取り組んでいる問題に適した値を実験して求めるパラメータです。これは、Dense レイヤーの中のニューロンの数を実験で求めるのとまったくおなじです。",
"_____no_output_____"
]
],
[
[
"embedding_layer = layers.Embedding(1000, 5)",
"_____no_output_____"
]
],
[
[
"Embedding レイヤーを作成するとき、埋め込みの重みは(ほかのレイヤーとおなじように)ランダムに初期化されます。訓練を通じて、これらの重みはバックプロパゲーションによって徐々に調整されます。いったん訓練が行われると、学習された単語埋め込みは、(モデルを訓練した特定の問題のために学習された結果)単語の間の類似性をおおまかにコード化しています。\n\nEmbedding レイヤーに整数を渡すと、結果はそれぞれの整数が埋め込みテーブルのベクトルに置き換えられます。",
"_____no_output_____"
]
],
[
[
"result = embedding_layer(tf.constant([1,2,3]))\nresult.numpy()",
"_____no_output_____"
]
],
[
[
"テキストあるいはシーケンスの問題では、入力として、Embedding レイヤーは shape が `(samples, sequence_length)` の2次元整数テンソルを取ります。ここで、各エントリは整数のシーケンスです。このレイヤーは、可変長のシーケンスを埋め込みベクトルにすることができます。上記のバッチでは、 `(32, 10)` (長さ10のシーケンス32個のバッチ)や、 `(64, 15)` (長さ15のシーケンス64個のバッチ)を埋め込みレイヤーに投入可能です。\n\n返されたテンソルは入力より 1つ軸が多くなっており、埋め込みベクトルはその最後の新しい軸に沿って並べられます。`(2, 3)` の入力バッチを渡すと、出力は `(2, 3, N)` となります。",
"_____no_output_____"
]
],
[
[
"result = embedding_layer(tf.constant([[0,1,2],[3,4,5]]))\nresult.shape",
"_____no_output_____"
]
],
[
[
"シーケンスのバッチを入力されると、Embedding レイヤーは shape が `(samples, sequence_length, embedding_dimensionality)` の3次元浮動小数点数テンソルを返します。この可変長のシーケンスを、固定長の表現に変換するには、さまざまな標準的なアプローチが存在します。Dense レイヤーに渡す前に、RNNやアテンション、プーリングレイヤーを使うことができます。ここでは、一番単純なのでプーリングを使用します。[RNN\n を使ったテキスト分類](https://github.com/tensorflow/docs/blob/master/site/ja/tutorials/text/text_classification_rnn.ipynb) は次のステップとしてよいチュートリアルでしょう。",
"_____no_output_____"
],
[
"## 埋め込みを最初から学習する",
"_____no_output_____"
],
[
"IMDB の映画レビューの感情分析器を訓練しようと思います。そのプロセスを通じて、埋め込みを最初から学習します。ここでは、前処理済みのデータセットを使用します。\n\nテキストデータセットを最初からロードする方法については、[テキスト読み込みのチュートリアル](../load_data/text.ipynb)を参照してください。",
"_____no_output_____"
]
],
[
[
"(train_data, test_data), info = tfds.load(\n 'imdb_reviews/subwords8k', \n split = (tfds.Split.TRAIN, tfds.Split.TEST), \n with_info=True, as_supervised=True)",
"_____no_output_____"
]
],
[
[
"エンコーダー(`tfds.features.text.SubwordTextEncoder`)を取得し、すこしボキャブラリを見てみましょう。\n\nボキャブラリ中の \"\\_\" は空白を表しています。ボキャブラリの中にどんなふうに(\"\\_\")で終わる単語全体と、長い単語を構成する単語の一部が含まれているかに注目してください。",
"_____no_output_____"
]
],
[
[
"encoder = info.features['text'].encoder\nencoder.subwords[:20]",
"_____no_output_____"
]
],
[
[
"映画のレビューはそれぞれ長さが異なっているはずです。`padded_batch` メソッドを使ってレビューの長さを標準化します。",
"_____no_output_____"
]
],
[
[
"train_batches = train_data.shuffle(1000).padded_batch(10)\ntest_batches = test_data.shuffle(1000).padded_batch(10)",
"_____no_output_____"
]
],
[
[
"インポートした状態では、レビューのテキストは整数エンコードされています(それぞれの整数がボキャブラリ中の特定の単語あるいは部分単語を表しています)。\n\nあとの方のゼロに注目してください。これは、バッチが一番長いサンプルに合わせてパディングされた結果です。",
"_____no_output_____"
]
],
[
[
"train_batch, train_labels = next(iter(train_batches))\ntrain_batch.numpy()",
"_____no_output_____"
]
],
[
[
"### 単純なモデルの構築\n\n[Keras Sequential API](../../guide/keras) を使ってモデルを定義することにします。今回の場合、モデルは「連続した Bag of Words」スタイルのモデルです。\n\n* 次のレイヤーは Embedding レイヤーです。このレイヤーは整数エンコードされた語彙を受け取り、それぞれの単語のインデックスに対応する埋め込みベクトルをみつけて取り出します。これらのベクトルはモデルの訓練により学習されます。このベクトルは出力配列に次元を追加します。その結果次元は `(batch, sequence, embedding)` となります。\n\n* 次に、GlobalAveragePooling1D レイヤーが、それぞれのサンプルについて、シーケンスの次元で平均を取り、固定長の出力ベクトルを返します。これにより、モデルは可変長の入力を最も簡単な方法で扱えるようになります。\n\n* この固定長のベクトルは、16個の隠れユニットを持つ全結合(Dense)レイヤーに接続されます。\n\n* 最後のレイヤーは、1個の出力ノードを持つ Dense レイヤーです。シグモイド活性化関数を使うことで、値は 0 と 1 の間の値を取り、レビューがポジティブ(好意的)であるかどうかの確率(または確信度)を表します。\n\nCaution: このモデルはマスキングを使用していません。このため、ゼロパディングが入力の一部として扱われ、結果としてパディングの長さが出力に影響を与える可能性があります。これを修正するには[マスキングとパディングのガイド](../../guide/keras/masking_and_padding)を参照してください。",
"_____no_output_____"
]
],
[
[
"embedding_dim=16\n\nmodel = keras.Sequential([\n layers.Embedding(encoder.vocab_size, embedding_dim),\n layers.Dense(16, activation='relu'),\n layers.GlobalAveragePooling1D(),\n layers.Dense(1, activation='sigmoid')\n])\n\nmodel.summary()",
"_____no_output_____"
]
],
[
[
"### モデルのコンパイルと訓練",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\n\nhistory = model.fit(\n train_batches,\n epochs=10,\n validation_data=test_batches, validation_steps=20)",
"_____no_output_____"
]
],
[
[
"このアプローチにより、モデルの評価時の正解率は 88% 前後に達します(モデルは過学習しており、訓練時の正解率の方が際立って高いことに注意してください)。",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nhistory_dict = history.history\n\nacc = history_dict['accuracy']\nval_acc = history_dict['val_accuracy']\nloss = history_dict['loss']\nval_loss = history_dict['val_loss']\n\nepochs = range(1, len(acc) + 1)\n\nplt.figure(figsize=(12,9))\nplt.plot(epochs, loss, 'bo', label='Training loss')\nplt.plot(epochs, val_loss, 'b', label='Validation loss')\nplt.title('Training and validation loss')\nplt.xlabel('Epochs')\nplt.ylabel('Loss')\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(12,9))\nplt.plot(epochs, acc, 'bo', label='Training acc')\nplt.plot(epochs, val_acc, 'b', label='Validation acc')\nplt.title('Training and validation accuracy')\nplt.xlabel('Epochs')\nplt.ylabel('Accuracy')\nplt.legend(loc='lower right')\nplt.ylim((0.5,1))\nplt.show()",
"_____no_output_____"
]
],
[
[
"## 学習した埋め込みの取得\n\n次に、訓練によって学習された単語埋め込みを取得してみます。これは、shape が `(vocab_size, embedding-dimension)` の行列になります。",
"_____no_output_____"
]
],
[
[
"e = model.layers[0]\nweights = e.get_weights()[0]\nprint(weights.shape) # shape: (vocab_size, embedding_dim)",
"_____no_output_____"
]
],
[
[
"この重みをディスクに出力します。[Embedding Projector](http://projector.tensorflow.org) を使うため、タブ区切り形式の2つのファイルをアップロードします。(埋め込みを含む)ベクトルのファイルと、(単語を含む)メタデータファイルです。",
"_____no_output_____"
]
],
[
[
"import io\n\nencoder = info.features['text'].encoder\n\nout_v = io.open('vecs.tsv', 'w', encoding='utf-8')\nout_m = io.open('meta.tsv', 'w', encoding='utf-8')\n\nfor num, word in enumerate(encoder.subwords):\n vec = weights[num+1] # 0 はパディングのためスキップ\n out_m.write(word + \"\\n\")\n out_v.write('\\t'.join([str(x) for x in vec]) + \"\\n\")\nout_v.close()\nout_m.close()",
"_____no_output_____"
]
],
[
[
"このチュートリアルを [Colaboratory](https://colab.research.google.com) で実行している場合には、下記のコードを使ってこれらのファイルをローカルマシンにダウンロードすることができます(あるいは、ファイルブラウザを使います。*表示 -> 目次 -> ファイル* )。",
"_____no_output_____"
]
],
[
[
"try:\n from google.colab import files\nexcept ImportError:\n pass\nelse:\n files.download('vecs.tsv')\n files.download('meta.tsv')",
"_____no_output_____"
]
],
[
[
"## 埋め込みを可視化する\n\n埋め込みを可視化するため、これらのファイルを Embedding Projector にアップロードします。\n\n[Embedding Projector](http://projector.tensorflow.org/) を開きます(あるいはローカルの TensorBoard でも実行できます)。\n\n* \"Load data\" をクリックします\n\n* 上記で作成した```vecs.tsv``` と ```meta.tsv``` の 2つのファイルをアップロードします\n\n\n学習させた埋め込みが表示されます。単語を探し、最も近い単語を見つけることができます。たとえば、\"beautiful\" という単語を探してみてください。近くに、 \"wonderful\" のような単語が見つかると思います。\n\nNote: 訓練の前に重みが乱数によってどのように初期化されたかによって、結果は少し異なるかもしれません。\n\nNote: 実験として、もっと単純なモデルを使い、より解釈しやすい埋め込みを作ることもできます。`Dense(16)` レイヤーを削除してみてください。このモデルを再度訓練して、埋め込みの可視化をもう一度行ってみましょう。\n\n<img src=\"https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/images/embedding.jpg?raw=1\" alt=\"Screenshot of the embedding projector\" width=\"400\"/>\n",
"_____no_output_____"
],
[
"## 次のステップ",
"_____no_output_____"
],
[
"このチュートリアルでは、小さなデータセットを使い、単語埋め込みを最初から訓練し、可視化する方法を見てきました。\n\n* リカレントネットワークについて学ぶには、[Keras RNN ガイド](../../guide/keras/rnn.ipynb) を参照してください。\n\n* テキスト分類について更に学ぶには、(全体のワークフローや、どういうときに埋め込みあるいはワンホットエンコーディングを使うべきかについて興味があれば)この実践的なテキスト分類の [ガイド](https://developers.google.com/machine-learning/guides/text-classification/step-2-5) を推奨します。",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
]
] |
e7faef8b6d1c1427272340e09456edaf255c29fe | 1,154 | ipynb | Jupyter Notebook | a.ipynb | justinms/mgd | 232b8637ee49bfe5765742de41c864794cb7ba07 | [
"MIT"
] | null | null | null | a.ipynb | justinms/mgd | 232b8637ee49bfe5765742de41c864794cb7ba07 | [
"MIT"
] | null | null | null | a.ipynb | justinms/mgd | 232b8637ee49bfe5765742de41c864794cb7ba07 | [
"MIT"
] | null | null | null | 18.918033 | 77 | 0.532062 | [
[
[
"from __future__ import print_function\nfrom ipywidgets import interact, interactive, fixed, interact_manual\nimport ipywidgets as widgets\n\ndef f(x):\n return x\n\ninteract(f, x=10);",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code"
]
] |
e7faf0401a80adc9a61f04fce0ae736d61b5b9ab | 101,770 | ipynb | Jupyter Notebook | pca/pca.ipynb | myusernameisuseless/python_for_data_analysis_mailru_mipt | bca30632f1b5c4608de6e1a68ffeb9e84cfc6135 | [
"Apache-2.0"
] | null | null | null | pca/pca.ipynb | myusernameisuseless/python_for_data_analysis_mailru_mipt | bca30632f1b5c4608de6e1a68ffeb9e84cfc6135 | [
"Apache-2.0"
] | null | null | null | pca/pca.ipynb | myusernameisuseless/python_for_data_analysis_mailru_mipt | bca30632f1b5c4608de6e1a68ffeb9e84cfc6135 | [
"Apache-2.0"
] | null | null | null | 102.384306 | 58,556 | 0.788543 | [
[
[
"# Методы обучения без учителя\n## Метод главных компонент",
"_____no_output_____"
],
[
"<font color = 'red'> Внимание! </font> Решение данной задачи предполагает, что у вас установлены библиотека numpy версии 1.16.4 и выше и библиотека scikit-learn версии 0.21.2 и выше. В следующей ячейке мы проверим это. Если у вас установлены более старые версии, обновите их пожалуйста, или воспользуйтесь бесплатным сервисом https://colab.research.google.com , в котором уже всё готово к работе. В архиве есть руководство по началу работы с colab.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport sklearn",
"_____no_output_____"
]
],
[
[
"В этом задании мы применим метод главных компонент на многомерных данных и постараемся найти оптимальную размерность признаков для решения задачи классификации",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"## Подготовка данных",
"_____no_output_____"
],
[
"Исходными [данными](http://archive.ics.uci.edu/ml/machine-learning-databases/auslan2-mld/auslan.data.html) являются показания различных сенсоров, установленных на руках человека, который умеет общаться на языке жестов.\n\nВ данном случае задача ставится следующим образом: по показаниям датчиков (по 11 сенсоров на каждую руку) определить слово, которое было показано человеком.\n\nКак можно решать такую задачу?\n\nПоказания датчиков представляются в виде временных рядов. Посмотрим на показания для одного из \"слов\"",
"_____no_output_____"
]
],
[
[
"# Загружаем данные сенсоров\ndf_database = pd.read_csv('sign_database.csv')\n\n# Загружаем метки классов\nsign_classes = pd.read_csv('sign_classes.csv', index_col=0, header=0, names=['id', 'class'])",
"_____no_output_____"
],
[
"# Столбец id - идентификаторы \"слов\"\n# Столбец time - метка времени\n# Остальные столбцы - показания серсоров для слова id в момент времени time\n\ndf_database.head()",
"_____no_output_____"
],
[
"# Выберем одно из слов с идентификатором = 0\nsign0 = df_database.query('id == 0').drop(['id'], axis=1).set_index('time')",
"_____no_output_____"
],
[
"sign0.plot()",
"_____no_output_____"
]
],
[
[
"Для каждого из \"слов\" у нас есть набор показаний сенсоров с разных частей руки в каждый момент времени.\n\nИдея нашего подхода будет заключаться в следующем – давайте для каждого сенсора составим набор характеристик (например, разброс значений, максимальное, минимальное, среднее значение, количество \"пиков\", и т.п.) и будем использовать эти новые \"признаки\" для решения задачи классификации.",
"_____no_output_____"
],
[
"## Расчет новых признаков",
"_____no_output_____"
],
[
"Признаки мы будем считать с помощью библиотеки [tsfresh](http://tsfresh.readthedocs.io/en/latest/index.html). Генерация новых признаков может занять много времени, поэтому мы сохранили посчитанные данные, но при желании вы можете повторить вычисления.",
"_____no_output_____"
]
],
[
[
"## Если не хотите долго ждать - не убирайте комментарии\n# from tsfresh.feature_extraction import extract_features\n# from tsfresh.feature_selection import select_features\n# from tsfresh.utilities.dataframe_functions import impute\n# from tsfresh.feature_extraction import ComprehensiveFCParameters, MinimalFCParameters, settings, EfficientFCParameters\n\n\n# sign_features = extract_features(df_database, column_id='id', column_sort='time',\n# default_fc_parameters=EfficientFCParameters(),\n# impute_function=impute)\n\n# sign_features_filtered = select_features(sign_features, s_classes.loc[:, 'target'])\n\n# filepath = './tsfresh_features_filt.csv.gz'\n# sign_features_filtered.to_csv(filepath, compression='gzip')",
"_____no_output_____"
],
[
"filepath = './tsfresh_features_filt.csv'\nsign_features_filtered = pd.read_csv(filepath)",
"_____no_output_____"
],
[
"sign_features_filtered.shape",
"_____no_output_____"
],
[
"sign_features_filtered.head()",
"_____no_output_____"
]
],
[
[
"## Базовая модель",
"_____no_output_____"
],
[
"В результате у нас получилось очень много признаков (аж 10865), давайте применим метод главных компонент, чтобы получить сжатое признаковое представление, сохранив при этом предиктивную силу в модели.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelEncoder",
"_____no_output_____"
]
],
[
[
"Создадим бейзлайн без уменьшения размерности. Гиперпараметры модели подбирались произвольно",
"_____no_output_____"
]
],
[
[
"# Подготовим данные на вход в модель\n\n# признаки\nX = sign_features_filtered.values\n\n# классы\nenc = LabelEncoder()\nenc.fit(sign_classes.loc[:, 'class'])\nsign_classes.loc[:, 'target'] = enc.transform(sign_classes.loc[:, 'class'])\ny = sign_classes.target.values",
"_____no_output_____"
],
[
"# Будем делать кросс-валидацию на 5 фолдов\ncv = StratifiedKFold(n_splits=5, shuffle=True, random_state=123)\n\nbase_model = Pipeline([\n ('scaler', StandardScaler()),\n ('clf', KNeighborsClassifier(n_neighbors=9))\n])\n\nbase_cv_scores = cross_val_score(base_model, X, y, cv=cv, scoring='accuracy')",
"_____no_output_____"
],
[
"base_cv_scores.mean()",
"_____no_output_____"
]
],
[
[
"Качество базовой модели должно быть в районе 92 процентов.",
"_____no_output_____"
],
[
"## Метод главных компонент",
"_____no_output_____"
],
[
"* Добавьте в пайплайн `base_model` шаг с методом главных компонент. Начиная с версии 0.18 в sklearn добавили разные солверы для PCA. Дополнитенльно задайте в модели следующие параметры: `svd_solder = \"randomized\"` и `random_state=123`.\n* Остальные гиперпараметры модели и способ кросс-валидации оставьте без изменений\n* Найдите такое наименьшее количество главных компонент, что качество нового пайплайна превыcит 80%\n* К качестве ответа укажите долю объяснённой дисперсии при найденной настройке PCA (для этого надо обучить PCA на всех данных). Формат ответа: число в интервале [0, 1] c точностью до сотых.",
"_____no_output_____"
],
[
"### *РЕШЕНИЕ*",
"_____no_output_____"
]
],
[
[
"numbers",
"_____no_output_____"
],
[
"numbers = [i for i in range(9, 19)]\nscores = []\n\nfor n in numbers:\n base_model1 = Pipeline([\n ('scaler', StandardScaler()),\n ('pca', PCA(n_components=n, svd_solver='randomized', random_state=123)),\n ('clf', KNeighborsClassifier(n_neighbors=9))\n ])\n scores.append(cross_val_score(base_model1, X, y, cv=cv, scoring='accuracy').mean())",
"_____no_output_____"
],
[
"best_pca = 14",
"_____no_output_____"
],
[
"X = StandardScaler().fit_transform(X)\npca = PCA(n_components=best_pca, svd_solver='randomized', random_state=123)\npca.fit(X)",
"_____no_output_____"
],
[
"plt.plot(pca.explained_variance_ratio_)",
"_____no_output_____"
],
[
"expl = pca.explained_variance_ratio_.sum()",
"_____no_output_____"
]
],
[
[
"# Ответ",
"_____no_output_____"
]
],
[
[
"print('{:.2f}'.format(expl))",
"0.39\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7faf20f3777b2c96c391a95f0dc12b5339f4569 | 33,062 | ipynb | Jupyter Notebook | my_notebooks/eval10_experiment5.ipynb | MichelML/ml-aging | b54470c00450da7d5b50e7be4a1f162f1c4b8531 | [
"Apache-2.0"
] | 7 | 2019-07-08T06:24:53.000Z | 2022-03-22T13:41:00.000Z | my_notebooks/eval10_experiment5.ipynb | MichelML/ml-aging | b54470c00450da7d5b50e7be4a1f162f1c4b8531 | [
"Apache-2.0"
] | null | null | null | my_notebooks/eval10_experiment5.ipynb | MichelML/ml-aging | b54470c00450da7d5b50e7be4a1f162f1c4b8531 | [
"Apache-2.0"
] | 2 | 2019-08-19T13:43:49.000Z | 2019-08-25T02:01:48.000Z | 57.599303 | 1,705 | 0.633235 | [
[
[
"## Load libraries",
"_____no_output_____"
]
],
[
[
"!pip install -q -r requirements.txt",
"\u001b[31mmenpo 0.8.1 has requirement matplotlib<2.0,>=1.4, but you'll have matplotlib 3.0.2 which is incompatible.\u001b[0m\n\u001b[31mmenpo 0.8.1 has requirement pillow<5.0,>=3.0, but you'll have pillow 5.4.1 which is incompatible.\u001b[0m\n\u001b[31mmenpo 0.8.1 has requirement scipy<1.0,>=0.16, but you'll have scipy 1.2.0 which is incompatible.\u001b[0m\n\u001b[33mYou are using pip version 10.0.1, however version 19.2.2 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
],
[
"import sys\nimport os\nimport numpy as np\nimport pandas as pd\n\nfrom PIL import Image\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.data as D\nfrom torch.optim.lr_scheduler import ExponentialLR\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom torchvision import transforms\n\nfrom ignite.engine import Events\nfrom scripts.ignite import create_supervised_evaluator, create_supervised_trainer\nfrom ignite.metrics import Loss, Accuracy\nfrom ignite.contrib.handlers.tqdm_logger import ProgressBar\nfrom ignite.handlers import EarlyStopping, ModelCheckpoint\nfrom ignite.contrib.handlers import LinearCyclicalScheduler, CosineAnnealingScheduler\n\nfrom tqdm import tqdm_notebook\n\nfrom sklearn.model_selection import train_test_split\n\nfrom efficientnet_pytorch import EfficientNet\n\nfrom scripts.evaluate import eval_model, eval_model_10\n\nimport warnings\nwarnings.filterwarnings('ignore')",
"_____no_output_____"
]
],
[
[
"## Define dataset and model",
"_____no_output_____"
]
],
[
[
"img_dir = '../input/rxrxairgb512'\npath_data = '../input/rxrxaicsv'\ndevice = 'cuda'\nbatch_size = 32\ntorch.manual_seed(0)\nmodel_name = 'efficientnet-b3'",
"_____no_output_____"
],
[
"jitter = (0.6, 1.4)\nclass ImagesDS(D.Dataset):\n # taken textbook from https://arxiv.org/pdf/1812.01187.pdf\n transform_train = transforms.Compose([\n transforms.RandomResizedCrop(448),\n transforms.ColorJitter(brightness=jitter, contrast=jitter, saturation=jitter, hue=.1),\n transforms.RandomHorizontalFlip(p=0.5),\n # PCA Noise should go here,\n transforms.ToTensor(),\n transforms.Normalize(mean=(123.68, 116.779, 103.939), std=(58.393, 57.12, 57.375))\n ])\n \n transform_validation = transforms.Compose([\n transforms.CenterCrop(448),\n transforms.ToTensor(),\n transforms.Normalize(mean=(123.68, 116.779, 103.939), std=(58.393, 57.12, 57.375))\n ])\n\n def __init__(self, df, img_dir=img_dir, mode='train', validation=False, site=1):\n self.records = df.to_records(index=False)\n self.site = site\n self.mode = mode\n self.img_dir = img_dir\n self.len = df.shape[0]\n self.validation = validation\n \n @staticmethod\n def _load_img_as_tensor(file_name, validation):\n with Image.open(file_name) as img:\n if not validation:\n return ImagesDS.transform_train(img)\n else:\n return ImagesDS.transform_validation(img)\n\n def _get_img_path(self, index, site=1):\n experiment, well, plate = self.records[index].experiment, self.records[index].well, self.records[index].plate\n return f'{self.img_dir}/{self.mode}/{experiment}_{plate}_{well}_s{site}.jpeg'\n \n def __getitem__(self, index):\n img1, img2 = [self._load_img_as_tensor(self._get_img_path(index, site), self.validation) for site in [1,2]]\n if self.mode == 'train':\n return img1, img2, int(self.records[index].sirna)\n else:\n return img1, img2, self.records[index].id_code\n\n def __len__(self):\n return self.len\n\n\nclass TestImagesDS(D.Dataset):\n transform = transforms.Compose([\n transforms.RandomCrop(448),\n transforms.ToTensor(),\n transforms.Normalize(mean=(123.68, 116.779, 103.939), std=(58.393, 57.12, 57.375))\n ])\n\n def __init__(self, df, img_dir=img_dir, mode='test', validation=False, site=1):\n self.records = df.to_records(index=False)\n self.site = site\n self.mode = mode\n self.img_dir = img_dir\n self.len = df.shape[0]\n self.validation = validation\n \n @staticmethod\n def _load_img_as_tensor(file_name):\n with Image.open(file_name) as img:\n return TestImagesDS.transform(img)\n\n def _get_img_path(self, index, site=1):\n experiment, well, plate = self.records[index].experiment, self.records[index].well, self.records[index].plate\n return f'{self.img_dir}/{self.mode}/{experiment}_{plate}_{well}_s{site}.jpeg'\n \n def get_image_pair(self, index):\n return [self._load_img_as_tensor(self._get_img_path(index, site)) for site in [1,2]]\n \n def __getitem__(self, index):\n image_pairs = [self.get_image_pair(index) for _ in range(20)]\n \n return image_pairs, self.records[index].id_code\n\n def __len__(self):\n return self.len",
"_____no_output_____"
],
[
"# dataframes for training, cross-validation, and testing\ndf_test = pd.read_csv(path_data+'/test.csv')\n\n# pytorch test dataset & loader\nds_test = TestImagesDS(df_test, mode='test', validation=True)\ntloader = D.DataLoader(ds_test, batch_size=1, shuffle=False, num_workers=4)",
"_____no_output_____"
],
[
"class EfficientNetTwoInputs(nn.Module):\n def __init__(self):\n super(EfficientNetTwoInputs, self).__init__()\n self.classes = 1108\n \n model = model = EfficientNet.from_pretrained(model_name, num_classes=1108) \n num_ftrs = model._fc.in_features\n model._fc = nn.Identity()\n \n self.resnet = model\n self.fc = nn.Linear(num_ftrs * 2, self.classes)\n\n def forward(self, x1, x2):\n x1_out = self.resnet(x1)\n x2_out = self.resnet(x2)\n \n N, _, _, _ = x1.size()\n x1_out = x1_out.view(N, -1)\n x2_out = x2_out.view(N, -1)\n \n out = torch.cat((x1_out, x2_out), 1)\n out = self.fc(out)\n\n return out \n \nmodel = EfficientNetTwoInputs()",
"Loaded pretrained weights for efficientnet-b3\n"
]
],
[
[
"#### Evaluate",
"_____no_output_____"
]
],
[
[
"model.cuda()\neval_model_10(model, tloader, 'models/Model_efficientnet-b3_93.pth', path_data)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fafaba15275aebab1b6617d1952ecedff094b0 | 265,282 | ipynb | Jupyter Notebook | Lab01/lmbaeza-lecture1.ipynb | lmbaeza/numerical-methods-2021 | 9e3d1ec7039067cf2a33a10328b307e7a27479c7 | [
"MIT"
] | null | null | null | Lab01/lmbaeza-lecture1.ipynb | lmbaeza/numerical-methods-2021 | 9e3d1ec7039067cf2a33a10328b307e7a27479c7 | [
"MIT"
] | null | null | null | Lab01/lmbaeza-lecture1.ipynb | lmbaeza/numerical-methods-2021 | 9e3d1ec7039067cf2a33a10328b307e7a27479c7 | [
"MIT"
] | null | null | null | 193.213401 | 54,086 | 0.883449 | [
[
[
"#Introduction to the Research Environment\n\nThe research environment is powered by IPython notebooks, which allow one to perform a great deal of data analysis and statistical validation. We'll demonstrate a few simple techniques here.",
"_____no_output_____"
],
[
"##Code Cells vs. Text Cells\n\nAs you can see, each cell can be either code or text. To select between them, choose from the 'Cell Type' dropdown menu on the top left.",
"_____no_output_____"
],
[
"##Executing a Command\n\nA code cell will be evaluated when you press play, or when you press the shortcut, shift-enter. Evaluating a cell evaluates each line of code in sequence, and prints the results of the last line below the cell.",
"_____no_output_____"
]
],
[
[
"2 + 2",
"_____no_output_____"
]
],
[
[
"Sometimes there is no result to be printed, as is the case with assignment.",
"_____no_output_____"
]
],
[
[
"X = 2",
"_____no_output_____"
]
],
[
[
"Remember that only the result from the last line is printed.",
"_____no_output_____"
]
],
[
[
"2 + 2\n3 + 3",
"_____no_output_____"
]
],
[
[
"However, you can print whichever lines you want using the `print` statement.",
"_____no_output_____"
]
],
[
[
"print(2 + 2)\n3 + 3",
"4\n"
]
],
[
[
"##Knowing When a Cell is Running\n\nWhile a cell is running, a `[*]` will display on the left. When a cell has yet to be executed, `[ ]` will display. When it has been run, a number will display indicating the order in which it was run during the execution of the notebook `[5]`. Try on this cell and note it happening.",
"_____no_output_____"
]
],
[
[
"#Take some time to run something\nc = 0\nfor i in range(10000000+1):\n c = c + i\nprint(c)",
"50000005000000\n"
]
],
[
[
"# Ejemplo 1:\n\n### Progresión Aritmética de Diferencia 1\n\n$\\frac{n\\cdot \\left(n+1\\right)}{2}=1+2+3+4+5+6+\\cdot \\cdot \\cdot +n$",
"_____no_output_____"
]
],
[
[
"n = 10000000\nprint(int(n*(n+1)/2))",
"50000005000000\n"
]
],
[
[
"##Importing Libraries\n\nThe vast majority of the time, you'll want to use functions from pre-built libraries. You can't import every library on Quantopian due to security issues, but you can import most of the common scientific ones. Here I import numpy and pandas, the two most common and useful libraries in quant finance. I recommend copying this import statement to every new notebook.\n\nNotice that you can rename libraries to whatever you want after importing. The `as` statement allows this. Here we use `np` and `pd` as aliases for `numpy` and `pandas`. This is a very common aliasing and will be found in most code snippets around the web. The point behind this is to allow you to type fewer characters when you are frequently accessing these libraries.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport pandas as pd\n\n# This is a plotting library for pretty pictures.\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"##Tab Autocomplete\n\nPressing tab will give you a list of IPython's best guesses for what you might want to type next. This is incredibly valuable and will save you a lot of time. If there is only one possible option for what you could type next, IPython will fill that in for you. Try pressing tab very frequently, it will seldom fill in anything you don't want, as if there is ambiguity a list will be shown. This is a great way to see what functions are available in a library.\n\nTry placing your cursor after the `.` and pressing tab.",
"_____no_output_____"
]
],
[
[
"np.random",
"_____no_output_____"
]
],
[
[
"##Getting Documentation Help\n\nPlacing a question mark after a function and executing that line of code will give you the documentation IPython has for that function. It's often best to do this in a new cell, as you avoid re-executing other code and running into bugs.",
"_____no_output_____"
]
],
[
[
"np.random.normal?",
"_____no_output_____"
]
],
[
[
"# Ejemplo 2\n# Obtener un numero primo entre 1 y 100",
"_____no_output_____"
]
],
[
[
"def is_prime(number):\n if number <= 1:\n return False\n elif number <= 3:\n return True\n \n if number%2==0 or number%3==0:\n return False\n i = 5\n while i*i <= number:\n if number % i == 0 or number % (i+2) == 0:\n return False;\n return True\n\nn = 0\n\nwhile True:\n n = np.random.randint(0, 100)\n if exist:\n break\n\nprint(n, \"Es un numero primo\")",
"49 Es un numero primo\n"
]
],
[
[
"##Sampling\n\nWe'll sample some random data using a function from `numpy`.",
"_____no_output_____"
]
],
[
[
"# Sample 100 points with a mean of 0 and an std of 1. This is a standard normal distribution.\nX = np.random.normal(0, 1, 100)\nX",
"_____no_output_____"
]
],
[
[
"##Plotting\n\nWe can use the plotting library we imported as follows.",
"_____no_output_____"
]
],
[
[
"plt.plot(X)",
"_____no_output_____"
]
],
[
[
"###Squelching Line Output\n\nYou might have noticed the annoying line of the form `[<matplotlib.lines.Line2D at 0x7f72fdbc1710>]` before the plots. This is because the `.plot` function actually produces output. Sometimes we wish not to display output, we can accomplish this with the semi-colon as follows.",
"_____no_output_____"
]
],
[
[
"plt.plot(X);",
"_____no_output_____"
]
],
[
[
"###Squelching Line Output\n\nYou might have noticed the annoying line of the form `[<matplotlib.lines.Line2D at 0x7f72fdbc1710>]` before the plots. This is because the `.plot` function actually produces output. Sometimes we wish not to display output, we can accomplish this with the semi-colon as follows.",
"_____no_output_____"
]
],
[
[
"plt.plot(X);",
"_____no_output_____"
]
],
[
[
"###Adding Axis Labels\n\nNo self-respecting quant leaves a graph without labeled axes. Here are some commands to help with that.",
"_____no_output_____"
]
],
[
[
"X = np.random.normal(0, 1, 100)\nX2 = np.random.normal(0, 1, 100)\n\nplt.plot(X);\nplt.plot(X2);\nplt.xlabel('Time') # The data we generated is unitless, but don't forget units in general.\nplt.ylabel('Returns')\nplt.legend(['X', 'X2']);",
"_____no_output_____"
]
],
[
[
"##Generating Statistics\n\nLet's use `numpy` to take some simple statistics.",
"_____no_output_____"
]
],
[
[
"Y = np.mean(X)\nY",
"_____no_output_____"
],
[
"Y = np.std(X)\nY",
"_____no_output_____"
]
],
[
[
"##Getting Real Pricing Data\n\nRandomly sampled data can be great for testing ideas, but let's get some real data. We can use `get_pricing` to do that. You can use the `?` syntax as discussed above to get more information on `get_pricing`'s arguments.",
"_____no_output_____"
]
],
[
[
"!pip install yfinance\n!pip install yahoofinancials",
"Collecting yfinance\n Downloading https://files.pythonhosted.org/packages/7a/e8/b9d7104d3a4bf39924799067592d9e59119fcfc900a425a12e80a3123ec8/yfinance-0.1.55.tar.gz\nRequirement already satisfied: pandas>=0.24 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.1.5)\nRequirement already satisfied: numpy>=1.15 in /usr/local/lib/python3.7/dist-packages (from yfinance) (1.19.5)\nRequirement already satisfied: requests>=2.20 in /usr/local/lib/python3.7/dist-packages (from yfinance) (2.23.0)\nRequirement already satisfied: multitasking>=0.0.7 in /usr/local/lib/python3.7/dist-packages (from yfinance) (0.0.9)\nCollecting lxml>=4.5.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/d2/88/b25778f17e5320c1c58f8c5060fb5b037288e162bd7554c30799e9ea90db/lxml-4.6.2-cp37-cp37m-manylinux1_x86_64.whl (5.5MB)\n\u001b[K |████████████████████████████████| 5.5MB 6.4MB/s \n\u001b[?25hRequirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2.8.1)\nRequirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas>=0.24->yfinance) (2018.9)\nRequirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (3.0.4)\nRequirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2.10)\nRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (2020.12.5)\nRequirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests>=2.20->yfinance) (1.24.3)\nRequirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas>=0.24->yfinance) (1.15.0)\nBuilding wheels for collected packages: yfinance\n Building wheel for yfinance (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for yfinance: filename=yfinance-0.1.55-py2.py3-none-any.whl size=22616 sha256=78ac0f3de68c7d18847d94804759fae3f2b9f9d7d2c15a96ffa32191883fb4ca\n Stored in directory: /root/.cache/pip/wheels/04/98/cc/2702a4242d60bdc14f48b4557c427ded1fe92aedf257d4565c\nSuccessfully built yfinance\nInstalling collected packages: lxml, yfinance\n Found existing installation: lxml 4.2.6\n Uninstalling lxml-4.2.6:\n Successfully uninstalled lxml-4.2.6\nSuccessfully installed lxml-4.6.2 yfinance-0.1.55\nCollecting yahoofinancials\n Downloading https://files.pythonhosted.org/packages/97/fe/be0f6ea704137848779fc61e7d1c9a901489aaf3423cd7b6f86a350c14c6/yahoofinancials-1.6.tar.gz\nRequirement already satisfied: beautifulsoup4 in /usr/local/lib/python3.7/dist-packages (from yahoofinancials) (4.6.3)\nRequirement already satisfied: pytz in /usr/local/lib/python3.7/dist-packages (from yahoofinancials) (2018.9)\nBuilding wheels for collected packages: yahoofinancials\n Building wheel for yahoofinancials (setup.py) ... \u001b[?25l\u001b[?25hdone\n Created wheel for yahoofinancials: filename=yahoofinancials-1.6-cp37-none-any.whl size=15192 sha256=80b2da5baec715b37852ee86addccc555aec36d52a5c8697b77c5991e5d0c4f0\n Stored in directory: /root/.cache/pip/wheels/d9/7e/cf/4977a8572d5247242a4b13018d1d36923024ba84236e0d28bc\nSuccessfully built yahoofinancials\nInstalling collected packages: yahoofinancials\nSuccessfully installed yahoofinancials-1.6\n"
],
[
"import yfinance as yf\nfrom yahoofinancials import YahooFinancials\n# Reference: https://towardsdatascience.com/a-comprehensive-guide-to-downloading-stock-prices-in-python-2cd93ff821d4",
"_____no_output_____"
],
[
"data = yf.download('MSFT', start='2012-01-01', end='2015-06-01', progress=False)",
"_____no_output_____"
]
],
[
[
"Our data is now a dataframe. You can see the datetime index and the colums with different pricing data.",
"_____no_output_____"
]
],
[
[
"data",
"_____no_output_____"
]
],
[
[
"This is a pandas dataframe, so we can index in to just get price like this. For more info on pandas, please [click here](http://pandas.pydata.org/pandas-docs/stable/10min.html).",
"_____no_output_____"
]
],
[
[
"X = data['Open']\nX",
"_____no_output_____"
]
],
[
[
"Because there is now also date information in our data, we provide two series to `.plot`. `X.index` gives us the datetime index, and `X.values` gives us the pricing values. These are used as the X and Y coordinates to make a graph.",
"_____no_output_____"
]
],
[
[
"plt.plot(X.index, X.values)\nplt.ylabel('Price')\nplt.legend(['MSFT']);",
"_____no_output_____"
],
[
"np.mean(X)",
"_____no_output_____"
],
[
"np.std(X)",
"_____no_output_____"
]
],
[
[
"##Getting Returns from Prices\n\nWe can use the `pct_change` function to get returns. Notice how we drop the first element after doing this, as it will be `NaN` (nothing -> something results in a NaN percent change).",
"_____no_output_____"
]
],
[
[
"R = X.pct_change()[1:]",
"_____no_output_____"
]
],
[
[
"We can plot the returns distribution as a histogram.",
"_____no_output_____"
]
],
[
[
"plt.hist(R, bins=20)\nplt.xlabel('Return')\nplt.ylabel('Frequency')\nplt.legend(['MSFT Returns']);",
"_____no_output_____"
]
],
[
[
"Get statistics again.",
"_____no_output_____"
]
],
[
[
"np.mean(R)",
"_____no_output_____"
],
[
"np.std(R)",
"_____no_output_____"
]
],
[
[
"Now let's go backwards and generate data out of a normal distribution using the statistics we estimated from Microsoft's returns. We'll see that we have good reason to suspect Microsoft's returns may not be normal, as the resulting normal distribution looks far different.",
"_____no_output_____"
]
],
[
[
"plt.hist(np.random.normal(np.mean(R), np.std(R), 10000), bins=20)\nplt.xlabel('Return')\nplt.ylabel('Frequency')\nplt.legend(['Normally Distributed Returns']);",
"_____no_output_____"
]
],
[
[
"##Generating a Moving Average\n\n`pandas` has some nice tools to allow us to generate rolling statistics. Here's an example. Notice how there's no moving average for the first 60 days, as we don't have 60 days of data on which to generate the statistic.",
"_____no_output_____"
]
],
[
[
"# Take the average of the last 60 days at each timepoint.\nMAVG = X.rolling(60)\nplt.plot(X.index, X.values)\nplt.ylabel('Price')\nplt.legend(['MSFT', '60-day MAVG']);",
"_____no_output_____"
]
],
[
[
"This presentation is for informational purposes only and does not constitute an offer to sell, a solicitation to buy, or a recommendation for any security; nor does it constitute an offer to provide investment advisory or other services by Quantopian, Inc. (\"Quantopian\"). Nothing contained herein constitutes investment advice or offers any opinion with respect to the suitability of any security, and any views expressed herein should not be taken as advice to buy, sell, or hold any security or as an endorsement of any security or company. In preparing the information contained herein, Quantopian, Inc. has not taken into account the investment needs, objectives, and financial circumstances of any particular investor. Any views expressed and data illustrated herein were prepared based upon information, believed to be reliable, available to Quantopian, Inc. at the time of publication. Quantopian makes no guarantees as to their accuracy or completeness. All information is subject to change and may quickly become unreliable for various reasons, including changes in market conditions or economic circumstances.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7fafeff9fe7919ad9abdcc991d92ced34444a8c | 9,215 | ipynb | Jupyter Notebook | assignment_2/sudoku.ipynb | sandilya761/CSD311-Assignments | aaa59e8eb6db446611c9c637c6c0ebd8dd7c5573 | [
"MIT"
] | null | null | null | assignment_2/sudoku.ipynb | sandilya761/CSD311-Assignments | aaa59e8eb6db446611c9c637c6c0ebd8dd7c5573 | [
"MIT"
] | null | null | null | assignment_2/sudoku.ipynb | sandilya761/CSD311-Assignments | aaa59e8eb6db446611c9c637c6c0ebd8dd7c5573 | [
"MIT"
] | null | null | null | 30.716667 | 150 | 0.476831 | [
[
[
"def cartesian_product(x,y):\n \n return [a+b for a in x for b in y] \n# takes two iterable values and return the cartesian product in a list\n",
"_____no_output_____"
],
[
"# displays the game board\n\ndef display_game_board(values):\n \n print('')\n \n rows = 'ABCDEFGHI'\n cols = '123456789'\n boxes = cartesian_product(rows, cols)\n \n width = 1+max(len(values[s]) for s in boxes)\n line = '+'.join(['-'*(width*3)]*3)\n for r in rows:\n print(''.join(values[r+c].center(width)+('|' if c in '36' else '')\n for c in cols))\n if r in 'CF': print(line)\n return",
"_____no_output_____"
],
[
"# elimination function eliminates the possible values according to the rules to get a simplified version of the puzzle.\ndef eliminate(Grid):\n for k,v in Grid.items():\n if len(v) != 1: # checks if the box needs elimination\n peers = peer_dict[k] # takes all the peers\n peer_values = set([Grid[p] for p in peers if len(Grid[p]) == 1])\n Grid[k] = ''.join(set(Grid[k]) - peer_values)\n return Grid",
"_____no_output_____"
],
[
"# checks the places the number if the there is only one choice to place it \ndef choice(Grid):\n for unit in unit_list:\n for num in '123456789':\n num_places = [box for box in unit if num in Grid[box]]\n if len(num_places) == 1:\n Grid[num_places[0]] = num\n return Grid",
"_____no_output_____"
],
[
"# Kindly check the documentation to know about naked_pairs\n\ndef naked_pairs(Grid):\n for unit in unit_list:\n \n # slice the Grid to contain only the boxes in the unit\n values = dict([[box, ''.join(sorted(Grid[box]))] for box in unit])\n \n # find all the items with 2-digit values\n double_digits = dict([[box, values[box]] for box in values if len(values[box])==2])\n \n # check if any of those 2-digit values occur exactly twice\n double_digits_occuring_twice = dict([[box, val] for box, val in double_digits.items() if list(double_digits.values()).count(val)==2])\n \n if len(double_digits_occuring_twice.items()) != 0:\n # reverse the dictionary to get the key-pairs easily\n reverse_dict = {}\n for k, v in double_digits_occuring_twice.items():\n reverse_dict.setdefault(v, []).append(k)\n\n # it is a list of 2 items(keys | boxes) only\n naked_pairs = list(reverse_dict.items())[0][1]\n\n # remove the naked_pairs digits from other boxes in the unit\n for k,v in values.items():\n if (k not in naked_pairs) and (len(v) > 1):\n values[k] = ''.join(set(values[k]) - set(values[naked_pairs[0]]))\n \n # replace the values in Grid with the updated values\n for k,v in values.items():\n Grid[k] = v\n \n return Grid\n",
"_____no_output_____"
],
[
"def run(Grid):\n stuck = False\n while not stuck:\n # Check how many boxes have a fixed value\n previous_solved = len([box for box in Grid.keys() if len(Grid[box]) == 1])\n \n \n Grid = eliminate(Grid)\n \n \n Grid = choice(Grid)\n \n \n Grid = naked_pairs(Grid)\n \n # Check how many boxes have a value, to compare\n post_solved_values = len([box for box in Grid.keys() if len(Grid[box]) == 1])\n \n # If no new values were added, stop the loop.\n stuck = previous_solved == post_solved_values\n \n \n # if the current sudoku board is cannot be solved then return False\n if len([box for box in Grid.keys() if len(Grid[box]) == 0]):\n return False \n return Grid\n",
"_____no_output_____"
],
[
"\ndef search(Grid):\n Grid = run(Grid)\n \n if Grid is False:\n return False\n \n if all(len(v) == 1 for k,v in Grid.items()): \n return Grid\n \n # Choose one of the unfilled squares with the fewest possibilities\n length,k = min((len(val), key) for key,val in Grid.items() if len(val) > 1)\n # print(k, length)\n \n # Now use recurrence to solve each one of the resulting sudoku\n for digit in Grid[k]:\n new_sudoku = dict(list(Grid.items()))\n new_sudoku[k] = digit\n attempt = search(new_sudoku)\n if attempt:\n return attempt",
"_____no_output_____"
],
[
"if __name__ == '__main__':\n \n \n start = '8xxxxxxxxxx36xxxxxx7xx9x2xxx5xxx7xxxxxxx457xxxxx1xxx3xxx1xxxx68xx85xxx1xx9xxxx4xx'\n #start = '..3.2.6..9..3.5..1..18.64....81.29..7.......8..67.82....26.95..8..2.3..9..5.1.3..'\n \n rows = 'ABCDEFGHI'\n cols = '123456789'\n boxes = cartesian_product(rows, cols)\n\n row_units = [cartesian_product(r, cols) for r in rows]\n col_units = [cartesian_product(rows, c) for c in cols]\n box_units = [cartesian_product(r,c) \n for r in ['ABC', 'DEF', 'GHI'] \n for c in ['123','456','789']]\n\n unit_list = row_units + col_units + box_units\n\n # each box(key) with its units(value)\n unit_dict = dict((box, [unit for unit in unit_list if box in unit]) for box in boxes)\n \n # each box with its peers\n peer_dict = dict((box, set(sum(unit_dict[box], [])) - set([box])) for box in boxes)\n\n # start string converted to dictionary\n assert len(start) == 81\n Grid = dict(zip(boxes, start))\n\n # replacing the x with '123456789' (possible values in the box)\n for k,v in Grid.items():\n if v == 'x':\n Grid[k] = '123456789'\n \n solved_grid = search(Grid)\n \n display_game_board(solved_grid)",
"\n8 1 4 |2 5 3 |6 9 7 \n9 2 3 |6 7 8 |1 5 4 \n5 7 6 |4 9 1 |2 8 3 \n------+------+------\n3 5 2 |9 6 7 |8 4 1 \n1 8 9 |3 4 5 |7 2 6 \n4 6 7 |1 8 2 |9 3 5 \n------+------+------\n2 4 1 |7 3 9 |5 6 8 \n7 6 8 |5 2 4 |3 1 9 \n3 9 5 |8 1 6 |4 7 2 \n"
],
[
"#new_grid = eliminate(Grid)",
"_____no_output_____"
],
[
"#display_game_board(new_grid)",
"_____no_output_____"
],
[
"#display_game_board(naked_dict)",
"_____no_output_____"
],
[
"#display_game_board(Grid)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fb03f645ae1e97ff59d1fbaa1c9ac9809352f5 | 661 | ipynb | Jupyter Notebook | notebooks/notyet/basics.ipynb | liuxfiu/qmodels | f04d28923c623495d7d1cc3962fb8cac61dc2685 | [
"MIT"
] | null | null | null | notebooks/notyet/basics.ipynb | liuxfiu/qmodels | f04d28923c623495d7d1cc3962fb8cac61dc2685 | [
"MIT"
] | null | null | null | notebooks/notyet/basics.ipynb | liuxfiu/qmodels | f04d28923c623495d7d1cc3962fb8cac61dc2685 | [
"MIT"
] | null | null | null | 16.525 | 34 | 0.515885 | [
[
[
"## Basic Queuing Models",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown"
]
] |
e7fb06af789f63db8696556322537565f0531b2c | 9,450 | ipynb | Jupyter Notebook | Resample_Audio.ipynb | materialvision/melgan-neurips | 928ebe4571617af6fc8929ae3af8c07d148413ab | [
"MIT"
] | 2 | 2020-12-14T12:31:50.000Z | 2021-06-24T02:46:46.000Z | Resample_Audio.ipynb | materialvision/melgan-neurips | 928ebe4571617af6fc8929ae3af8c07d148413ab | [
"MIT"
] | null | null | null | Resample_Audio.ipynb | materialvision/melgan-neurips | 928ebe4571617af6fc8929ae3af8c07d148413ab | [
"MIT"
] | 4 | 2020-09-20T01:49:03.000Z | 2021-11-18T17:58:16.000Z | 38.888889 | 1,295 | 0.533439 | [
[
[
"<a href=\"https://colab.research.google.com/github/buganart/melgan-neurips/blob/master/Resample_Audio.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>",
"_____no_output_____"
]
],
[
[
"#@markdown Before starting please save the notebook in your drive by clicking on `File -> Save a copy in drive`",
"_____no_output_____"
],
[
"#@markdown Check how many CPUs we have, you can choose a high memory instance to get 4.\nimport os\nprint(f\"We have {os.cpu_count()} CPU cores.\")",
"We have 4 CPU cores.\n"
],
[
"#@markdown Mount google drive\nfrom google.colab import drive, output\ndrive.mount('/content/drive')\n\nfrom pathlib import Path\nif not Path(\"/content/drive/My Drive/IRCMS_GAN_collaborative_database\").exists():\n raise RuntimeError(\n \"Shortcut to our shared drive folder doesn't exits.\\n\\n\"\n \"\\t1. Go to the google drive web UI\\n\"\n \"\\t2. Right click shared folder IRCMS_GAN_collaborative_database and click \\\"Add shortcut to Drive\\\"\"\n )\n\ndef clear_on_success(msg=\"Ok!\"):\n if _exit_code == 0:\n output.clear()\n print(msg)",
"Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"
],
[
"#@markdown Configuration\n\n#@markdown Directories can be found via file explorer on the left by navigating into `drive` to the desired folders. \n#@markdown Then right-click and `Copy path`.\naudio_db_dir = \"/content/drive/My Drive/AUDIO DATABASE/RAW Sessions/Roberto Studio Material\" #@param {type:\"string\"}\nresample_dir = \"/content/drive/My Drive/AUDIO DATABASE/RAW Sessions/Roberto Studio Material 22050\" #@param {type:\"string\"}\nsample_rate = 22050 #@param {type:\"string\"}\nsample_rate = int(sample_rate)\n\naudio_db_dir = Path(audio_db_dir)\nresample_dir = Path(resample_dir)\n\nresample_dir.mkdir(parents=True, exist_ok=True)\n\nif not audio_db_dir.exists():\n raise RuntimeError(\"audio_db_dir {audio_db_dir} does not exists.\")",
"_____no_output_____"
],
[
"#@markdown Install recent ffmpeg.\n!add-apt-repository -y ppa:jonathonf/ffmpeg-4\n!apt install ffmpeg\nclear_on_success(\"ffmpeg installed.\")\n!ffmpeg -version",
"ffmpeg installed.\nffmpeg version 4.3.1-0york0~18.04 Copyright (c) 2000-2020 the FFmpeg developers\nbuilt with gcc 7 (Ubuntu 7.5.0-3ubuntu1~18.04)\nconfiguration: --prefix=/usr --extra-version='0york0~18.04' --toolchain=hardened --libdir=/usr/lib/x86_64-linux-gnu --incdir=/usr/include/x86_64-linux-gnu --arch=amd64 --enable-gpl --disable-stripping --enable-avresample --disable-filter=resample --enable-gnutls --enable-ladspa --enable-libaom --enable-libass --enable-libbluray --enable-libbs2b --enable-libcaca --enable-libcdio --enable-libcodec2 --enable-libflite --enable-libfontconfig --enable-libfreetype --enable-libfribidi --enable-libgme --enable-libgsm --enable-libjack --enable-libmp3lame --enable-libmysofa --enable-libopenjpeg --enable-libopenmpt --enable-libopus --enable-libpulse --enable-librabbitmq --enable-librsvg --enable-librubberband --enable-libshine --enable-libsnappy --enable-libsoxr --enable-libspeex --enable-libsrt --enable-libssh --enable-libtheora --enable-libtwolame --enable-libvidstab --enable-libvorbis --enable-libvpx --enable-libwavpack --enable-libwebp --enable-libx265 --enable-libxml2 --enable-libxvid --enable-libzmq --enable-libzvbi --enable-lv2 --enable-omx --enable-openal --enable-opencl --enable-opengl --enable-sdl2 --enable-libzimg --enable-pocketsphinx --enable-libdc1394 --enable-libdrm --enable-libiec61883 --enable-chromaprint --enable-frei0r --enable-libx264 --enable-shared\nlibavutil 56. 51.100 / 56. 51.100\nlibavcodec 58. 91.100 / 58. 91.100\nlibavformat 58. 45.100 / 58. 45.100\nlibavdevice 58. 10.100 / 58. 10.100\nlibavfilter 7. 85.100 / 7. 85.100\nlibavresample 4. 0. 0 / 4. 0. 0\nlibswscale 5. 7.100 / 5. 7.100\nlibswresample 3. 7.100 / 3. 7.100\nlibpostproc 55. 7.100 / 55. 7.100\n"
],
[
"#@markdown Resample audio files.\nimport subprocess\nfrom pathlib import Path\n\nfrom joblib import Parallel, delayed\n\n\ndef convert(input, output, sample_rate):\n command = [\"ffmpeg\", \"-i\", str(input), \"-y\", \"-ar\", str(sample_rate), str(output)]\n try:\n return subprocess.check_output(command, stderr=subprocess.STDOUT,)\n except subprocess.CalledProcessError as exc:\n print(f\"Return code: {exc.returncode}\\n\", exc.output)\n raise\n\n\ndef main(*, in_dir, out_dir, sample_rate):\n in_dir = Path(in_dir)\n out_dir = Path(out_dir)\n in_paths = list(Path(in_dir).rglob(\"*.*\"))\n out_paths = [out_dir / in_path.relative_to(in_dir) for in_path in in_paths]\n\n for sub_dir in set(out_path.parent for out_path in out_paths):\n sub_dir.mkdir(exist_ok=True, parents=True)\n\n Parallel(n_jobs=-1, backend='multiprocessing', verbose=2)(\n delayed(convert)(in_path, out_path, sample_rate)\n for in_path, out_path in zip(in_paths, out_paths)\n )\n\nmain(in_dir=audio_db_dir, out_dir=resample_dir, sample_rate=sample_rate)\n\nprint('Done!')",
"[Parallel(n_jobs=-1)]: Using backend MultiprocessingBackend with 4 concurrent workers.\n[Parallel(n_jobs=-1)]: Done 33 tasks | elapsed: 1.1min\n[Parallel(n_jobs=-1)]: Done 154 tasks | elapsed: 3.4min\n[Parallel(n_jobs=-1)]: Done 357 tasks | elapsed: 4.9min\n"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fb0ea4786edefd82552976617378cc7661d804 | 77,754 | ipynb | Jupyter Notebook | nn.ipynb | GavinPHR/Parser | 3e7dd453d756526105edf8cacd35c72481940c0e | [
"MIT"
] | 1 | 2021-08-04T12:24:46.000Z | 2021-08-04T12:24:46.000Z | nn.ipynb | GavinPHR/Parser | 3e7dd453d756526105edf8cacd35c72481940c0e | [
"MIT"
] | null | null | null | nn.ipynb | GavinPHR/Parser | 3e7dd453d756526105edf8cacd35c72481940c0e | [
"MIT"
] | null | null | null | 567.547445 | 51,810 | 0.936762 | [
[
[
"import config\nfrom preprocessing import mappings, transforms, treebank_reader\nfrom training import parameter, rules, rules_and_count\n\nif __name__ == '__main__':\n # mp.set_start_method('fork')\n tb = treebank_reader.TreebankReader(config.train_file)\n config.train = tb.read()\n tb = treebank_reader.TreebankReader(config.dev_file)\n config.dev = tb.read()\n\n\n config.nonterminal_map = mappings.NonterminalMap(config.train)\n config.terminal_map = mappings.TerminalMap(config.train, len(config.nonterminal_map))\n transforms.transform_trees()\n\n # Training\n config.rules_and_count = rules_and_count.RulesAndCount()\n rules_full = rules.Rules()\n parameter.process_parameter(rules_full)\n \n split = int(len(config.train) * 0.9)\n config.dev = config.train[split:]\n config.train = config.train[:split]",
"23:21:00 - TreebankReader starts reading file.\n100%|██████████| 842/842 [00:00<00:00, 1321.95it/s]\n23:21:00 - TreebankReader finishes reading file.\n23:21:00 - TreebankReader starts reading file.\n100%|██████████| 10/10 [00:00<00:00, 1293.94it/s]\n23:21:00 - TreebankReader finishes reading file.\n23:21:00 - Creating mappings between non-terminals and ints.\nnonterminal_map: 100%|██████████| 842/842 [00:00<00:00, 22456.39it/s]\n23:21:00 - Finished creating mappings.\n23:21:00 - Creating mappings between terminals and ints.\n100%|██████████| 9/9 [00:00<00:00, 303.75it/s]\n23:21:01 - Finished creating mappings.\n23:21:01 - Transforming str/int trees to int/str trees.\n100%|██████████| 842/842 [00:00<00:00, 5628.19it/s]\n23:21:01 - Finished transforming trees.\n23:21:01 - Starting to aggregate rules and their counts.\n100%|██████████| 842/842 [00:00<00:00, 4565.36it/s]\n23:21:01 - Done aggregating rules and their counts.\n23:21:01 - Starting to aggregate nonterminals.\n100%|██████████| 842/842 [00:00<00:00, 15922.18it/s]\n23:21:01 - Done aggregating nonterminals.\n23:21:01 - Processing final parameters.\n23:21:01 - Done processing final parameters.\n"
],
[
"config.train[0]",
"_____no_output_____"
],
[
"config.dev[0]",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7fb224bbc97d4b21ebff985ef056a3209f33037 | 459,120 | ipynb | Jupyter Notebook | reproduce_plots/rj4a_plots.ipynb | nasa/1d-pinn-reconstruction | 9798c1696447fa9f1c7098cb12e49ed60d736f67 | [
"NASA-1.3",
"BSD-3-Clause"
] | 2 | 2021-11-18T10:36:59.000Z | 2022-01-19T16:35:41.000Z | reproduce_plots/rj4a_plots.ipynb | nasa/1d-pinn-reconstruction | 9798c1696447fa9f1c7098cb12e49ed60d736f67 | [
"NASA-1.3",
"BSD-3-Clause"
] | null | null | null | reproduce_plots/rj4a_plots.ipynb | nasa/1d-pinn-reconstruction | 9798c1696447fa9f1c7098cb12e49ed60d736f67 | [
"NASA-1.3",
"BSD-3-Clause"
] | null | null | null | 762.657807 | 76,380 | 0.951157 | [
[
[
"# basic libraries\nimport numpy as np\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\nimport mhd_numerical_diff2 as mhdmod\nimport traj_utilities as tju\nimport matplotlib.pyplot as plt\nimport h5py as h5",
"_____no_output_____"
],
[
"#trained model weights\nmodel_rj4a = mhdmod.MHD_nd(gamma=5./3.,nh=48, nlayers=4, model_name='dummy',do_visc=True,visc=0.005, dx=0.001, dt=0.001)\nmodel_rj4a.load_weights(\"./rj4a/4_traj/best_weights/weights\")",
"_____no_output_____"
],
[
"# read in spacetime data\nfname = '../rj4a_space/rj4a_ST.h5'\nspace,time,sc_1,sc_2,sc_3,sc_4,st_data,st_input, data_input = tju.load_sp_mhd_data(fname, 'rj4a')",
"_____no_output_____"
],
[
"# define grid(s) for reproducing results\n\n# for comparison with full simulation\nxx,tt = np.meshgrid(space,time,indexing='ij')\nxxtt = np.stack((xx.flatten(), tt.flatten())).T #returns (Np, 2) for input into PINN\n\nnx = len(space)\nnt = len(time)\n\n#lower res grid\nnx_samp = 101\nnt_samp = 101\nsptim_lin = mhdmod.generate_spacetime_coloc_linear([[space[0],space[-1]]],[time[0],time[-1]],nx_samp,nt_samp).numpy()",
"_____no_output_____"
],
[
"#generate predictions\npred_rj4a = model_rj4a(xxtt)\npred_rho = pred_rj4a[:,0].numpy().reshape(nx,nt)\npred_vx = pred_rj4a[:,1].numpy().reshape(nx,nt)\npred_vy = pred_rj4a[:,2].numpy().reshape(nx,nt)\npred_p = pred_rj4a[:,4].numpy().reshape(nx,nt)\npred_by = pred_rj4a[:,6].numpy().reshape(nx,nt)\n\npred_rj4a_lin = model_rj4a(sptim_lin)\npred_rho_lin = pred_rj4a_lin[:,0].numpy().reshape(nx_samp,nt_samp)\npred_vx_lin = pred_rj4a_lin[:,1].numpy().reshape(nx_samp,nt_samp)\npred_vy_lin = pred_rj4a_lin[:,2].numpy().reshape(nx_samp,nt_samp)\npred_p_lin = pred_rj4a_lin[:,4].numpy().reshape(nx_samp,nt_samp)\npred_by_lin = pred_rj4a_lin[:,6].numpy().reshape(nx_samp,nt_samp)",
"_____no_output_____"
],
[
"# plots\nfig = plt.figure()\nax = fig.add_subplot(121)\nax2 = fig.add_subplot(122)\nfig.set_size_inches(7,3)\n\nax.plot(space,st_data[0,:,80],color='k') # 80 is t = 0.16\nax.set_xlim(0.,1.)\nax.set_ylim(0.,1.2)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Density\")\nax.set_title(\"Density at t = 0.16\")\n\nimg = ax2.pcolormesh(space,time,st_data[0].T, cmap=plt.cm.rainbow,vmin=0., vmax=1.2, shading='auto')\nax2.set_xlim(0.,1.)\nax2.set_ylim(0.,0.18)\nax2.set_xlabel(\"X\")\nax2.set_ylabel(\"Time\")\nax2.set_title(\"Density in Spacetime\")\nax2.plot(sc_1[:,0], sc_1[:,1],color='k')\nax2.plot(sc_2[:,0], sc_2[:,1],color='k')\nax2.plot(sc_3[:,0], sc_3[:,1],color='k')\nax2.plot(sc_4[:,0], sc_4[:,1],color='k')\nax2.axhline(0.16,0,1,color='white',ls='--',lw=1.5)\n\nfig.colorbar(img)\nplt.tight_layout()\n#fig.savefig(\"RJ4a_density_example.png\", dpi=400)",
"_____no_output_____"
],
[
"error_rho_rel = np.abs((pred_rho - st_data[0])/st_data[0])\nerror_vx_rel = np.abs((pred_vx - st_data[1])/pred_vx)\nerror_vy_rel = np.abs((pred_vy - st_data[2])/pred_vy)\nerror_p_rel = np.abs((pred_p - st_data[4])/st_data[4])\nerror_by_rel = np.abs((pred_by - st_data[6])/pred_by)",
"_____no_output_____"
],
[
"# error comparison\nfig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the relative error of density\nimg = ax.pcolormesh(space,time,error_rho_rel.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()\n#fig.savefig(\"Sod_rel_error_compare.png\", dpi=400)",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the relative error of pressure\n\nimg = ax.pcolormesh(space,time,error_p_rel.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the relative error of By\n# jankiness due to By = 0 on rhs of figure\n\nimg = ax.pcolormesh(space,time,error_by_rel.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()",
"_____no_output_____"
],
[
"error_rho_abs = np.abs((pred_rho - st_data[0]))\nerror_vx_abs = np.abs((pred_vx - st_data[1]))\nerror_vy_abs = np.abs((pred_vy - st_data[2]))\nerror_p_abs = np.abs((pred_p - st_data[4]))\nerror_by_abs = np.abs((pred_by - st_data[6]))",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the absolute error of density\nimg = ax.pcolormesh(space,time,error_rho_abs.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the absolute error of pressure\nimg = ax.pcolormesh(space,time,error_p_abs.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(111)\n\n#This figure is a plot of the absolute error of by\nimg = ax.pcolormesh(space,time,error_by_abs.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\nax.plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\nax.plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\nax.set_xlim(0.,1.)\nax.set_ylim(0.,0.18)\nax.set_xlabel(\"X\")\nax.set_ylabel(\"Time\")\n\nfig.colorbar(img)\nplt.tight_layout()",
"_____no_output_____"
],
[
"fig = plt.figure()\nax = fig.add_subplot(221)\n\nax.plot(space,st_data[0,:,80],color='k',label=\"Exact\")\ni = np.where(sptim_lin[::nt_samp,1] >= 0.16)[0][0]\nax.plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*i:(i+1)*101,0],color='r', label='Prediction')\nax.legend(loc='best')\nax.set_xlim(0,1)\nax.set_ylim(0,1.2)\nax.set_xlabel(\"X\",fontsize=14)\nax.set_title(\"Density, t = 0.16\",fontsize=14)\n\nax2 = fig.add_subplot(222)\nax2.plot(space,st_data[1,:,80],color='k',label=\"Exact\")\nax2.plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*i:(i+1)*101,1],color='r', label='Prediction')\nax2.legend(loc='best')\nax2.set_xlim(0,1)\nax2.set_ylim(-0.5,1.)\nax2.set_xlabel(\"X\",fontsize=14)\nax2.set_title(\"Velocity X, t = 0.2\",fontsize=14)\n\nax3 = fig.add_subplot(223)\nj = np.where(sptim_lin[::nt_samp,1] >= 0.05)[0][0]\nax3.plot(space,st_data[0,:,25],color='k',label=\"Exact\")\nax3.plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*j:(j+1)*101,0],color='r', label='Prediction')\nax3.legend(loc='best')\nax3.set_xlim(0,1)\nax3.set_ylim(0.,1.2)\nax3.set_xlabel(\"X\",fontsize=14)\nax3.set_title(\"Density, t = 0.05\",fontsize=14)\n\nax4 = fig.add_subplot(224)\nax4.plot(space,st_data[1,:,25],color='k',label=\"Exact\")\nax4.plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*j:(j+1)*101,1],color='r', label='Prediction')\nax4.legend(loc='best')\nax4.set_xlim(0,1)\nax4.set_ylim(-0.5,1.)\nax4.set_xlabel(\"X\",fontsize=14)\nax4.set_title(\"Velocity X, t = 0.05\",fontsize=14)\n\n\nfig.set_size_inches(15,12)\nplt.tight_layout()",
"_____no_output_____"
],
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\n\n# Helper function used for visualization in the following examples\ndef identify_axes(ax_dict, fontsize=48):\n \"\"\"\n Helper to identify the Axes in the examples below.\n\n Draws the label in a large font in the center of the Axes.\n\n Parameters\n ----------\n ax_dict : dict[str, Axes]\n Mapping between the title / label and the Axes.\n fontsize : int, optional\n How big the label should be.\n \"\"\"\n kw = dict(ha=\"center\", va=\"center\", fontsize=fontsize, color=\"darkgrey\")\n for k, ax in ax_dict.items():\n ax.text(0.5, 0.5, k, transform=ax.transAxes, **kw)",
"_____no_output_____"
],
[
"fig = plt.Figure(constrained_layout=True)\nmosaic = \"\"\"\nAAB\nAAC\n\"\"\"\naxd = fig.subplot_mosaic(mosaic)\nidentify_axes(axd)\nfig",
"_____no_output_____"
],
[
"fig = plt.Figure(constrained_layout=True)\nmosaic = \"\"\"\nAABB\nAACC\n\"\"\"\naxd = fig.subplot_mosaic(mosaic)\nimg = axd['A'].pcolormesh(space,time,error_rho_rel.T, cmap=plt.cm.YlOrRd,vmin=0., vmax=0.4, shading='auto')\naxd['A'].plot(sc_1[:,0], sc_1[:,1],color='k', ls=':',alpha=0.3)\naxd['A'].plot(sc_2[:,0], sc_2[:,1],color='k', ls=':',alpha=0.3)\naxd['A'].plot(sc_3[:,0], sc_3[:,1],color='k', ls=':',alpha=0.3)\naxd['A'].plot(sc_4[:,0], sc_4[:,1],color='k', ls=':',alpha=0.3)\naxd['A'].set_xlim(0.,1.)\naxd['A'].set_ylim(0.,0.18)\naxd['A'].set_xlabel(\"X\",fontsize=12)\naxd['A'].set_ylabel(\"Time\",fontsize=12)\naxd['A'].set_title(\"Density relative error\", fontsize=12)\n\naxd['B'].plot(space,st_data[0,:,80],color='k',label=\"Exact\")\ni = np.where(sptim_lin[::nt_samp,1] >= 0.16)[0][0]\naxd['B'].plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*i:(i+1)*101,0],color='r', label='Prediction')\naxd['B'].legend(loc='best')\naxd['B'].set_xlim(0,1)\naxd['B'].set_ylim(0,1.2)\naxd['B'].set_xlabel(\"X\",fontsize=12)\naxd['B'].set_title(\"Density, t = 0.16\",fontsize=12)\naxd['B'].grid(True,color='gray',ls='--',alpha=0.5)\n\naxd['C'].plot(space,st_data[0,:,25],color='k',label=\"Exact\")\ni = np.where(sptim_lin[::nt_samp,1] >= 0.05)[0][0]\naxd['C'].plot(sptim_lin[:nx_samp,0], pred_rj4a_lin[101*i:(i+1)*101,0],color='r', label='Prediction')\naxd['C'].legend(loc='best')\naxd['C'].set_xlim(0,1)\naxd['C'].set_ylim(0,1.2)\naxd['C'].set_xlabel(\"X\",fontsize=12)\naxd['C'].set_title(\"Density, t = 0.05\",fontsize=12)\naxd['C'].grid(True,color='gray',ls='--',alpha=0.5)\n\nfig.set_size_inches(8,4)\nfig.colorbar(img, location='right',ax=axd['A'],fraction=0.01, aspect=50)\nfig.savefig(\"RJ4a_mosaic.jpg\", dpi=300)\nfig",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fb2418fd7db874e82f4a3262b6cc2898bb1999 | 625,448 | ipynb | Jupyter Notebook | talktorials/5_compound_clustering/T5_compound_clustering.ipynb | caramirezs/TeachOpenCADD | 8fcf0b388822bbffce5bad3b7c818fb100d942d7 | [
"CC-BY-4.0"
] | null | null | null | talktorials/5_compound_clustering/T5_compound_clustering.ipynb | caramirezs/TeachOpenCADD | 8fcf0b388822bbffce5bad3b7c818fb100d942d7 | [
"CC-BY-4.0"
] | null | null | null | talktorials/5_compound_clustering/T5_compound_clustering.ipynb | caramirezs/TeachOpenCADD | 8fcf0b388822bbffce5bad3b7c818fb100d942d7 | [
"CC-BY-4.0"
] | null | null | null | 529.591871 | 119,128 | 0.943017 | [
[
[
"# Talktorial 5\n\n# Compound clustering\n\n#### Developed in the CADD seminars 2017 and 2018, AG Volkamer, Charité/FU Berlin \n\nCalvinna Caswara and Gizem Spriewald",
"_____no_output_____"
],
[
"## Aim of this talktorial\n\nSimilar compounds might bind to the same targets and show similar effects. \nBased on this similar property principle, compound similarity can be used to build chemical groups via clustering. \nFrom such a clustering, a diverse set of compounds can also be selected from a larger set of screening compounds for further experimental testing.\n\n## Learning goals\n\nIn this talktorial, we will learn more about:\n\n* How to group compounds and how to pick a diverse set of compounds\n* Short introduction to two clustering algorithms\n* Application of the Butina clustering algorithm to a sample compound set\n\n### Theory\n\n* Introduction to clustering and Jarvis-Patrick algorithm\n* Detailed explanation of Butina clustering\n* Picking diverse compounds\n\n### Practical\n\n* Examples for Butina clustering and compound picking\n\n## References\n\n* Butina, D. Unsupervised Data Base Clustering Based on Daylight’s Fingerprint and Tanimoto Similarity: A Fast and Automated Way To Cluster Small and Large Data Set. J. Chem. Inf. Comput. Sci. 1999.\n* Leach, Andrew R., Gillet, Valerie J. An Introduction to Chemoinformatics. 2003\n* Jarvis-Patrick Clustering: http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm\n* TDT Tutorial: https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb\n* rdkit clustering documentation: http://rdkit.org/docs/Cookbook.html#clustering-molecules",
"_____no_output_____"
],
[
"_____________________________________________________________________________________________________________________\n\n\n## Theory\n\n### Introduction to clustering and Jarvis-Patrick algorithm\n\n[Clustering](https://en.wikipedia.org/wiki/Cluster_analysis) can be defined as 'the task of grouping a set of objects in such a way that objects in the same group (called a cluster) are more similar (in some sense) to each other than to those in other groups (clusters)'.\n\nCompound clustering in pharmaceutical research is often based on chemical or structural similarity between compounds to find groups that share properties as well as to design a diverse and representative set for further analysis. \n\nGeneral procedure: \n\n* Method are based on clustering data by similarity between neighboring points. \n* In cheminformatics, compounds are often encoded as molecular fingerprints and similarity can be described by the Tanimoto similarity (see **talktorial 4**).\n * As a quick reminder: Fingerprints are binary vectors where each bit indicates the presence or absence of a particular substructural fragment within a molecule. \n * Similarity (or distance) matrix: The similarity between each pair of molecules represented by binary fingerprints is most frequently quantified using the Tanimoto coefficient, which measures the number of common features (bits). \n * The value of the Tanimoto coefficient ranges from zero (no similarity) to one (high similarity).\n\nThere are a number of clustering algorithms available, with the [Jarvis-Patrick clustering](http://www.improvedoutcomes.com/docs/WebSiteDocs/Clustering/Jarvis-Patrick_Clustering_Overview.htm) being one of the most widely used algorithms in the pharmaceutical context.\n\nJarvis-Patrick clustering algorithm is defined by two parameters K and K<sub>min</sub>:\n* Calculate the set of K nearest neighbors for each molecule. \n* Two molecules cluster together if \n * they are in each others list of nearest neighbors\n * they have at least K<sub>min</sub> of their K nearest neighbors in common.\n\nThe Jarvis-Patrick clustering algorithm is deterministic and able to deal with large sets of molecules in a matter of a few hours. However, a downside lies in the fact that this method tends to produce large heterogeneous clusters (see ref. Butina clustering). \n\nMore clustering algorithms can also be found in the [scikit-learn clustering module](http://scikit-learn.org/stable/modules/clustering.html).",
"_____no_output_____"
],
[
"### Detailed explanation of Butina clustering\n\nButina clustering ([*J. Chem. Inf. Model.*(1999), 39(4), 747](https://pubs.acs.org/doi/abs/10.1021/ci9803381)) was developed to identify smaller but homogeneous clusters, with the prerequisite that (at least) the cluster centroid will be more similar than a given threshold to every other molecule in the cluster.\n\nThese are the key steps in this clustering approach (see flowchart below):\n\n#### 1. Data preparation and compound encoding\n* To identify chemical similarities, the compounds in the input data (e.g. given as SMILES) will be encoded as molecular fingerprints, e.g., RDK5 fingerprint which is a subgraph-based fingerprint similar to the well known [Daylight Fingerprint](/http://www.daylight.com/dayhtml/doc/theory/theory.finger.html) (which was used in the original publication).\n\n\n#### 2. Tanimoto similarity (or distance) matrix\n* The similarity between two fingerprints is calculated using the Tanimoto coefficient.\n* Matrix with Tanimoto similarities between all possible molecule/fingerprint pairs (n*n similarity matrix with n=number of molecules, upper triangle matrix used only)\n* Equally, the distances matrix can be calculated (1 - similarity)\n\n#### 3. Clustering molecules: Centroids and exclusion spheres \nNote: Molecules will be clustered together, if they have a maximum distance below a specified cut-off from the cluster centroid (if distance matrix is used) or if they have a minimum similarity above the specified cut-off (if similarity matrix is used). \n\n* **Identification of potential cluster centroids**\n * The cluster centroid is the molecule within a given cluster which has the largest number of neighbors.\n * Annotate neighbors: For each molecule count all molecules with a Tanimoto distance below a given threshold.\n * Sort the molecules by their number of neighbors in descending order, so that potential cluster centroids (i.e. the compounds with the largest number of neighbors) are placed at the top of the file. \n\n* **Clustering based on the exclusion spheres**\n * Starting with the first molecule (centroid) in the sorted list\n * All molecules with a Tanimoto index above or equal to the cut-off value used for clustering then become members of that cluster (in case of similarity).\n * Each molecule that has been identified as a member of the given cluster is flagged and removed from further comparisons. Thus, flagged molecules cannot become either another cluster centroid or a member of another cluster. This process is like putting an exclusion sphere around the newly formed cluster.\n * Once the first compound in the list has found all its neighbors, the first available (i.e. not flagged) compound at the top of the list becomes the new cluster centroid.\n * The same process is repeated for all other unflagged molecules down the list.\n * Molecules that have not been flagged by the end of the clustering process become singletons.\n * Note that some molecules assigned as singletons can have neighbors at the given Tanimoto similarity index, but those neighbors have been excluded by a stringer cluster centroid.",
"_____no_output_____"
]
],
[
[
"from IPython.display import IFrame\nIFrame('images/butina_full.pdf', width=600, height=300)",
"_____no_output_____"
]
],
[
[
"*Figure 1:* Theoretical example of the Butina clustering algorithm, drawn by Calvinna Caswara.",
"_____no_output_____"
],
[
"### Picking diverse compounds\n\nFinding representative sets of compounds is a concept often used in pharmaceutical industry.\n\n* Let's say, we applied a virtual screening campaign but only have a limited amount of resources to experimentally test a few compounds in a confirmatory assay. \n* In order to obtain as much information as possible from this screen, we want to select a diverse set. Thus, we pick one representative of each chemical series in our list of potentially active compounds.\n\nAnother scenario would be to select one series to gain information about the structure-activity relationship, i.e., how do small structural changes in the molecule affect the in vitro activity.",
"_____no_output_____"
],
[
"## Practical\n\n### Example using the Butina Clustering Algorithm\nApplication is following the example of [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb).",
"_____no_output_____"
],
[
"#### 1. Load data and calculate fingerprints\nIn this part the data is prepared and fingerprints are calculated.",
"_____no_output_____"
]
],
[
[
"# Import packages\nimport pandas as pd\nimport numpy\nimport matplotlib.pyplot as plt\nimport time\nimport random\nfrom random import choices\nfrom rdkit import Chem\nfrom rdkit.Chem import AllChem\nfrom rdkit import DataStructs\nfrom rdkit.DataStructs import cDataStructs\nfrom rdkit.ML.Cluster import Butina\nfrom rdkit.Chem import Draw\nfrom rdkit.Chem import rdFingerprintGenerator\nfrom rdkit.Chem.Draw import IPythonConsole",
"_____no_output_____"
],
[
"# Load and have a look into data\n# Filtered data taken from talktorial 2\ncompound_df= pd.read_csv('../data/T2/EGFR_compounds_lipinski.csv',sep=\";\", index_col=0)\nprint('data frame shape:',compound_df.shape)\ncompound_df.head()",
"data frame shape: (4925, 10)\n"
],
[
"# Create molecules from SMILES and store in array\nmols = []\nfor i in compound_df.index:\n chemblId = compound_df['molecule_chembl_id'][i]\n smiles = compound_df['smiles'][i]\n mols.append((Chem.MolFromSmiles(smiles), chemblId))\nmols[0:5]",
"_____no_output_____"
],
[
"# Create fingerprints for all molecules\nrdkit_gen = rdFingerprintGenerator.GetRDKitFPGenerator(maxPath=5)\nfingerprints = [rdkit_gen.GetFingerprint(m) for m,idx in mols]\n\n# How many compounds/fingerprints do we have?\nprint('Number of compounds converted:',len(fingerprints))\nprint('Fingerprint length per compound:',len(fingerprints[0]))",
"Number of compounds converted: 4925\nFingerprint length per compound: 2048\n"
]
],
[
[
"#### 2. Tanimoto similarity and distance matrix\nNow that we generated fingerprints, we move on to the next step: The identification of potential cluster centroids. For this, we define functions to calculate the Tanimoto similarity and distance matrix.",
"_____no_output_____"
]
],
[
[
"# Calculate distance matrix for fingerprint list\ndef Tanimoto_distance_matrix(fp_list):\n dissimilarity_matrix = []\n for i in range(1,len(fp_list)):\n similarities = DataStructs.BulkTanimotoSimilarity(fp_list[i],fp_list[:i])\n # Since we need a distance matrix, calculate 1-x for every element in similarity matrix\n dissimilarity_matrix.extend([1-x for x in similarities])\n return dissimilarity_matrix",
"_____no_output_____"
]
],
[
[
"See also [rdkit Cookbook: Clustering molecules](http://rdkit.org/docs/Cookbook.html#clustering-molecules).",
"_____no_output_____"
]
],
[
[
"# Example: Calculate single similarity of two fingerprints\nsim = DataStructs.TanimotoSimilarity(fingerprints[0],fingerprints[1])\nprint ('Tanimoto similarity: %4.2f, distance: %4.2f' %(sim,1-sim))",
"Tanimoto similarity: 0.73, distance: 0.27\n"
],
[
"# Example: Calculate distance matrix (distance = 1-similarity)\nTanimoto_distance_matrix(fingerprints)[0:5]",
"_____no_output_____"
],
[
"# Side note: That looked like a list and not a matrix. \n# But it is a triangular similarity matrix in the form of a list\nn = len(fingerprints)\n\n# Calculate number of elements in triangular matrix via n*(n-1)/2\nelem_triangular_matr = (n*(n-1))/2\nprint(int(elem_triangular_matr), len(Tanimoto_distance_matrix(fingerprints)))",
"12125350 12125350\n"
]
],
[
[
"#### 3. Clustering molecules: Centroids and exclusion spheres\nIn this part, we cluster the molecules and look at the results.",
"_____no_output_____"
],
[
"Define a clustering function.",
"_____no_output_____"
]
],
[
[
"# Input: Fingerprints and a threshold for the clustering\ndef ClusterFps(fps,cutoff=0.2):\n # Calculate Tanimoto distance matrix\n distance_matr = Tanimoto_distance_matrix(fps)\n # Now cluster the data with the implemented Butina algorithm:\n clusters = Butina.ClusterData(distance_matr,len(fps),cutoff,isDistData=True)\n return clusters",
"_____no_output_____"
]
],
[
[
"Cluster the molecules based on their fingerprint similarity.",
"_____no_output_____"
]
],
[
[
"# Run the clustering procedure for the dataset\nclusters = ClusterFps(fingerprints,cutoff=0.3)\n\n# Give a short report about the numbers of clusters and their sizes\nnum_clust_g1 = len([c for c in clusters if len(c) == 1])\nnum_clust_g5 = len([c for c in clusters if len(c) > 5])\nnum_clust_g25 = len([c for c in clusters if len(c) > 25])\nnum_clust_g100 = len([c for c in clusters if len(c) > 100])\n\nprint(\"total # clusters: \", len(clusters))\nprint(\"# clusters with only 1 compound: \", num_clust_g1)\nprint(\"# clusters with >5 compounds: \", num_clust_g5)\nprint(\"# clusters with >25 compounds: \", num_clust_g25)\nprint(\"# clusters with >100 compounds: \", num_clust_g100)",
"total # clusters: 795\n# clusters with only 1 compound: 373\n# clusters with >5 compounds: 176\n# clusters with >25 compounds: 31\n# clusters with >100 compounds: 4\n"
],
[
"# Plot the size of the clusters\nfig = plt.figure(1, figsize=(10, 4))\nplt1 = plt.subplot(111)\nplt.axis([0, len(clusters), 0, len(clusters[0])+1])\nplt.xlabel('Cluster index', fontsize=20)\nplt.ylabel('Number of molecules', fontsize=20)\nplt.tick_params(labelsize=16)\nplt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)\nplt.show()",
"_____no_output_____"
]
],
[
[
"#### How to pick a reasonable cutoff?\nSince the clustering result depends on the threshold chosen by the user, we will have a closer look on the choice of a cutoff.",
"_____no_output_____"
]
],
[
[
"for i in numpy.arange(0., 1.0, 0.1):\n clusters = ClusterFps(fingerprints,cutoff=i)\n fig = plt.figure(1, figsize=(10, 4))\n plt1 = plt.subplot(111)\n plt.axis([0, len(clusters), 0, len(clusters[0])+1])\n plt.xlabel('Cluster index', fontsize=20)\n plt.ylabel('Number of molecules', fontsize=20)\n plt.tick_params(labelsize=16)\n plt.title('Threshold: '+str('%3.1f' %i), fontsize=20)\n plt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)\n plt.show()",
"_____no_output_____"
]
],
[
[
"As you can see, the higher the threshold (distance cutoff), the more molecules are considered as similar and, therefore, clustered into less clusters.\nThe lower the threshold, the more small clusters and \"singletons\" appear.\n* The smaller the distance value cut-off, the more similar the compounds are required to be to belong to one cluster.\n\nLooking at the plots above, we decided to choose a distance threshold of 0.2. There are not many singletons and the cluster sizes don't have an extreme but smooth distribution.",
"_____no_output_____"
]
],
[
[
"dist_co = 0.2\nclusters = ClusterFps(fingerprints,cutoff=dist_co)\n\n# Plot the size of the clusters - save plot\nfig = plt.figure(1, figsize=(8, 2.5))\nplt1 = plt.subplot(111)\nplt.axis([0, len(clusters), 0, len(clusters[0])+1])\nplt.xlabel('Cluster index', fontsize=20)\nplt.ylabel('# molecules', fontsize=20)\nplt.tick_params(labelsize=16)\nplt1.bar(range(1, len(clusters)), [len(c) for c in clusters[:len(clusters)-1]], lw=0)\nplt.title('Threshold: '+str('%3.1f' %dist_co), fontsize=20)\nplt.savefig(\"../data/T5/cluster_dist_cutoff_%4.2f.png\" %dist_co, dpi=300, bbox_inches=\"tight\", transparent=True)\n\nprint('Number of clusters %d from %d molecules at distance cut-off %4.2f' %(len(clusters), len(mols), dist_co))\nprint('Number of molecules in largest cluster:', len(clusters[0]))\nprint('Similarity between two random points in same cluster %4.2f'%DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]],fingerprints[clusters[0][1]]))\nprint('Similarity between two random points in different cluster %4.2f'%DataStructs.TanimotoSimilarity(fingerprints[clusters[0][0]],fingerprints[clusters[1][0]]))",
"Number of clusters 1225 from 4925 molecules at distance cut-off 0.20\nNumber of molecules in largest cluster: 146\nSimilarity between two random points in same cluster 0.82\nSimilarity between two random points in different cluster 0.22\n"
]
],
[
[
"### Cluster visualization",
"_____no_output_____"
],
[
"#### 10 examples from largest cluster\n\nNow, let's have a closer look at the first 10 molecular structures of the first/largest clusters.",
"_____no_output_____"
]
],
[
[
"print ('Ten molecules from largest cluster:')\n# Draw molecules\nDraw.MolsToGridImage([mols[i][0] for i in clusters[0][:10]], \n legends=[mols[i][1] for i in clusters[0][:10]], \n molsPerRow=5)",
"Ten molecules from largest cluster:\n"
],
[
"# Save molecules from largest cluster for MCS analysis in Talktorial 9\nw = Chem.SDWriter('../data/T5/molSet_largestCluster.sdf')\n\n# Prepare data\ntmp_mols=[]\nfor i in clusters[0]:\n tmp = mols[i][0]\n tmp.SetProp(\"_Name\",mols[i][1])\n tmp_mols.append(tmp) \n\n# Write data\nfor m in tmp_mols: w.write(m)",
"_____no_output_____"
]
],
[
[
"#### 10 examples from second largest cluster",
"_____no_output_____"
]
],
[
[
"print ('Ten molecules from second largest cluster:')\n# Draw molecules\nDraw.MolsToGridImage([mols[i][0] for i in clusters[1][:10]], \n legends=[mols[i][1] for i in clusters[1][:10]], \n molsPerRow=5)",
"Ten molecules from second largest cluster:\n"
]
],
[
[
"The first ten molecules in the respective clusters look indeed similar to each other and many share a common scaffold (visually detected). \n\nSee **talktorial 6** for more information on how to calculate the maximum common substructure (MCS) of a set of molecules.",
"_____no_output_____"
],
[
"#### Examples from first 10 clusters\n\nFor comparison, we have a look at the cluster centers of the first 10 clusters.",
"_____no_output_____"
]
],
[
[
"print ('Ten molecules from first 10 clusters:')\n# Draw molecules\nDraw.MolsToGridImage([mols[clusters[i][0]][0] for i in range(10)], \n legends=[mols[clusters[i][0]][1] for i in range(10)], \n molsPerRow=5)",
"Ten molecules from first 10 clusters:\n"
]
],
[
[
"Save cluster centers from first 3 clusters as SVG file.",
"_____no_output_____"
]
],
[
[
"# Generate image\nimg = Draw.MolsToGridImage([mols[clusters[i][0]][0] for i in range(0,3)],\n legends=[\"Cluster \"+str(i) for i in range(1,4)],\n subImgSize=(200,200), useSVG=True)\n\n# Get SVG data\nmolsvg = img.data\n\n# Replace non-transparent to transparent background and set font size\nmolsvg = molsvg.replace(\"opacity:1.0\", \"opacity:0.0\");\nmolsvg = molsvg.replace(\"12px\", \"20px\");\n\n# Save altered SVG data to file\nf = open(\"../data/T5/cluster_representatives.svg\", \"w\")\nf.write(molsvg)\nf.close()",
"_____no_output_____"
]
],
[
[
"While still some similarity is visible, clearly, the centroids from the different clusters look more dissimilar then the compounds within one cluster.",
"_____no_output_____"
],
[
"#### Intra-cluster Tanimoto similarities\n\nWe can also have a look at the intra-cluster Tanimoto similarities.",
"_____no_output_____"
]
],
[
[
"# Function to compute Tanimoto similarity for all pairs of fingerprints in each cluster\ndef IntraTanimoto(fps_clusters):\n intra_similarity =[]\n # Calculate intra similarity per cluster\n for k in range(0,len(fps_clusters)):\n # Tanimoto distance matrix function converted to similarity matrix (1-distance)\n intra_similarity.append([1-x for x in Tanimoto_distance_matrix(fps_clusters[k])])\n return intra_similarity ",
"_____no_output_____"
],
[
"# Recompute fingerprints for 10 first clusters\nmol_fps_per_cluster=[]\nfor c in clusters[:10]:\n mol_fps_per_cluster.append([rdkit_gen.GetFingerprint(mols[i][0]) for i in c])\n# Compute intra-cluster similarity \nintra_sim = IntraTanimoto(mol_fps_per_cluster)",
"_____no_output_____"
],
[
"# Violin plot with intra-cluster similarity\npos = list(range(10))\nlabels = pos\nplt.figure(1, figsize=(10, 5))\nax = plt.subplot(111)\nr = plt.violinplot(intra_sim, pos, showmeans=True, showmedians=True, showextrema=False)\nax.set_xticks(pos)\nax.set_xticklabels(labels)\nax.set_yticks(numpy.arange(0.6, 1., 0.1))\nax.set_title('Intra-cluster Tanimoto similarity', fontsize=13)\nr['cmeans'].set_color('red')\n# mean=red, median=blue",
"_____no_output_____"
]
],
[
[
"### Compound picking\nIn the following, we are going to pick a final list of **max. 1000 compounds** as a **diverse** subset. \n\nFor this, we take the cluster centroid from each cluster (i.e. the first molecule of each cluster) and then we take - starting with the largest cluster - for each cluster the 10 molecules (or 50% if less than 10 molecules are left in the cluster) most similar to the centroid, until we have selected max. 1000 compounds. Thus, we have representatives of each cluster. \n\nAim of this compound picking is to ensure the diversity for a smaller set of compounds which are proposed for testing in a confirmatory assay. \n\nPicking procedure was adapted from [TDT tutorial notebook by S. Riniker and G. Landrum](https://github.com/sriniker/TDT-tutorial-2014/blob/master/TDT_challenge_tutorial.ipynb). \nAs described there: The idea behind this approach is to ensure diversity (representatives of each cluster) while getting some SAR from the results of the confirmatory assay (groups of quite similar molecules from larger clusters retained).",
"_____no_output_____"
],
[
"Get cluster centers.",
"_____no_output_____"
]
],
[
[
"# Get the cluster center of each cluster (first molecule in each cluster)\nclus_center = [mols[c[0]] for c in clusters]\n# How many cluster centers/clusters do we have?\nprint('Number of cluster centers: ', len(clus_center))",
"Number of cluster centers: 1225\n"
]
],
[
[
"Sort clusters by size and molecules in each cluster by similarity.",
"_____no_output_____"
]
],
[
[
"# Sort the molecules within a cluster based on their similarity \n# to the cluster center and sort the clusters based on their size\nclusters_sort = []\nfor c in clusters:\n if len(c) < 2: continue # Singletons\n else:\n # Compute fingerprints for each cluster element\n fps_clust = [rdkit_gen.GetFingerprint(mols[i][0]) for i in c]\n # Similarity of all cluster members to the cluster center\n simils = DataStructs.BulkTanimotoSimilarity(fps_clust[0],fps_clust[1:])\n # Add index of the molecule to its similarity (centroid excluded!)\n simils = [(s,index) for s,index in zip(simils, c[1:])]\n # Sort in descending order by similarity\n simils.sort(reverse=True)\n # Save cluster size and index of molecules in clusters_sort\n clusters_sort.append((len(simils), [i for s,i in simils]))\n # Sort in descending order by cluster size\n clusters_sort.sort(reverse=True)",
"_____no_output_____"
]
],
[
[
"Pick a maximum of 1000 compounds.",
"_____no_output_____"
]
],
[
[
"# Count selected molecules, pick cluster centers first\nsel_molecules = clus_center.copy()\n# Take 10 molecules (or a maximum of 50%) of each cluster starting with the largest one\nindex = 0\ndiff = 1000 - len(sel_molecules)\nwhile diff > 0 and index < len(clusters_sort):\n # Take indices of sorted clusters\n tmp_cluster = clusters_sort[index][1]\n # If the first cluster is > 10 big then take exactly 10 compounds\n if clusters_sort[index][0] > 10:\n num_compounds = 10\n # If smaller, take half of the molecules\n else:\n num_compounds = int(0.5*len(c))+1\n if num_compounds > diff: \n num_compounds = diff\n # Write picked molecules and their structures into list of lists called picked_fps\n sel_molecules += [mols[i] for i in tmp_cluster[:num_compounds]]\n index += 1\n diff = 1000 - len(sel_molecules)\nprint('# Selected molecules: '+str(len(sel_molecules)))",
"# Selected molecules: 1225\n"
]
],
[
[
"This set of diverse molecules could now be used for experimental testing.",
"_____no_output_____"
],
[
"### (Additional information: run times)\n\nAt the end of the talktorial, we can play with the size of the dataset and see how the Butina clustering run time changes.",
"_____no_output_____"
]
],
[
[
"# Reuse old dataset\nsampled_mols = mols.copy()",
"_____no_output_____"
]
],
[
[
"Note that you can try out larger datasets, but data sizes larger than 10000 data points already start to consume quite some memory and time (that's why we stopped there). ",
"_____no_output_____"
]
],
[
[
"# Helper function for time computation\ndef MeasureRuntime(sampled_mols):\n start_time = time.time()\n sampled_fingerprints = [rdkit_gen.GetFingerprint(m) for m,idx in sampled_mols]\n\n # Run the clustering with the dataset\n sampled_clusters = ClusterFps(sampled_fingerprints,cutoff=0.3)\n return(time.time() - start_time)",
"_____no_output_____"
],
[
"dsize=[100, 500, 1000, 2000, 4000, 6000, 8000, 10000] \nruntimes=[]\n# Take random samples with replacement\nfor s in dsize:\n tmp_set = [sampled_mols[i] for i in sorted(numpy.random.choice(range(len(sampled_mols)), size=s))]\n tmp_t= MeasureRuntime(tmp_set)\n print('Dataset size %d, time %4.2f seconds' %(s, tmp_t))\n runtimes.append(tmp_t)",
"Dataset size 100, time 0.07 seconds\nDataset size 500, time 0.34 seconds\nDataset size 1000, time 0.72 seconds\nDataset size 2000, time 2.01 seconds\nDataset size 4000, time 4.90 seconds\nDataset size 6000, time 10.00 seconds\nDataset size 8000, time 14.33 seconds\nDataset size 10000, time 23.43 seconds\n"
],
[
"plt.plot(dsize, runtimes, 'g^')\nplt.title('Runtime measurement of Butina Clustering with different dataset sizes')\nplt.xlabel('# Molecules in data set')\nplt.ylabel('Runtime in seconds')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Quiz\n* Why is clustering of molecules important?\n* Which algorithms can you use to cluster a set of molecules and what is the general idea behind the algorithm?\n* Do you know other clustering algorithms?",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
]
] |
e7fb432eef1dad67cdda009b4932b8452dcc0bc3 | 20,743 | ipynb | Jupyter Notebook | site/en/r2/tutorials/load_data/text.ipynb | crypdra/docs | 41ab06fd14b3a3dff933bb80b19ce46c7c5781cf | [
"Apache-2.0"
] | 2 | 2019-10-25T18:51:16.000Z | 2019-10-25T18:51:18.000Z | site/en/r2/tutorials/load_data/text.ipynb | crypdra/docs | 41ab06fd14b3a3dff933bb80b19ce46c7c5781cf | [
"Apache-2.0"
] | null | null | null | site/en/r2/tutorials/load_data/text.ipynb | crypdra/docs | 41ab06fd14b3a3dff933bb80b19ce46c7c5781cf | [
"Apache-2.0"
] | null | null | null | 31.052395 | 368 | 0.529191 | [
[
[
"##### Copyright 2018 The TensorFlow Authors.\n\n",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Load text with tf.data",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/beta/tutorials/load_data/text\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/text.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/r2/tutorials/load_data/text.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/r2/tutorials/load_data/text.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"This tutorial provides an example of how to use `tf.data.TextLineDataset` to load examples from text files. `TextLineDataset` is designed to create a dataset from a text file, in which each example is a line of text from the original file. This is potentially useful for any text data that is primarily line-based (for example, poetry or error logs).\n\nIn this tutorial, we'll use three different English translations of the same work, Homer's Illiad, and train a model to identify the translator given a single line of text.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import, division, print_function, unicode_literals\n\ntry:\n # %tensorflow_version only exists in Colab.\n %tensorflow_version 2.x\nexcept Exception:\n pass\nimport tensorflow as tf\n\nimport tensorflow_datasets as tfds\nimport os",
"_____no_output_____"
]
],
[
[
"The texts of the three translations are by:\n\n - [William Cowper](https://en.wikipedia.org/wiki/William_Cowper) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/cowper.txt)\n\n - [Edward, Earl of Derby](https://en.wikipedia.org/wiki/Edward_Smith-Stanley,_14th_Earl_of_Derby) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/derby.txt)\n\n- [Samuel Butler](https://en.wikipedia.org/wiki/Samuel_Butler_%28novelist%29) — [text](https://storage.googleapis.com/download.tensorflow.org/data/illiad/butler.txt)\n\nThe text files used in this tutorial have undergone some typical preprocessing tasks, mostly removing stuff — document header and footer, line numbers, chapter titles. Download these lightly munged files locally.",
"_____no_output_____"
]
],
[
[
"DIRECTORY_URL = 'https://storage.googleapis.com/download.tensorflow.org/data/illiad/'\nFILE_NAMES = ['cowper.txt', 'derby.txt', 'butler.txt']\n\nfor name in FILE_NAMES:\n text_dir = tf.keras.utils.get_file(name, origin=DIRECTORY_URL+name)\n \nparent_dir = os.path.dirname(text_dir)\n\nparent_dir",
"_____no_output_____"
]
],
[
[
"## Load text into datasets\n\nIterate through the files, loading each one into its own dataset.\n\nEach example needs to be labeled individually labeled, so use `tf.data.Dataset.map` to apply a labeler function to each one. This will iterate over every example in the dataset, returning (`example, label`) pairs.",
"_____no_output_____"
]
],
[
[
"def labeler(example, index):\n return example, tf.cast(index, tf.int64) \n\nlabeled_data_sets = []\n\nfor i, file_name in enumerate(FILE_NAMES):\n lines_dataset = tf.data.TextLineDataset(os.path.join(parent_dir, file_name))\n labeled_dataset = lines_dataset.map(lambda ex: labeler(ex, i))\n labeled_data_sets.append(labeled_dataset)",
"_____no_output_____"
]
],
[
[
"Combine these labeled datasets into a single dataset, and shuffle it.\n",
"_____no_output_____"
]
],
[
[
"BUFFER_SIZE = 50000\nBATCH_SIZE = 64\nTAKE_SIZE = 5000",
"_____no_output_____"
],
[
"all_labeled_data = labeled_data_sets[0]\nfor labeled_dataset in labeled_data_sets[1:]:\n all_labeled_data = all_labeled_data.concatenate(labeled_dataset)\n \nall_labeled_data = all_labeled_data.shuffle(\n BUFFER_SIZE, reshuffle_each_iteration=False)",
"_____no_output_____"
]
],
[
[
"You can use `tf.data.Dataset.take` and `print` to see what the `(example, label)` pairs look like. The `numpy` property shows each Tensor's value.",
"_____no_output_____"
]
],
[
[
"for ex in all_labeled_data.take(5):\n print(ex)",
"_____no_output_____"
]
],
[
[
"## Encode text lines as numbers\n\nMachine learning models work on numbers, not words, so the string values need to be converted into lists of numbers. To do that, map each unique word to a unique integer.\n\n### Build vocabulary\n\nFirst, build a vocabulary by tokenizing the text into a collection of individual unique words. There are a few ways to do this in both TensorFlow and Python. For this tutorial:\n\n1. Iterate over each example's `numpy` value.\n2. Use `tfds.features.text.Tokenizer` to split it into tokens.\n3. Collect these tokens into a Python set, to remove duplicates.\n4. Get the size of the vocabulary for later use.",
"_____no_output_____"
]
],
[
[
"tokenizer = tfds.features.text.Tokenizer()\n\nvocabulary_set = set()\nfor text_tensor, _ in all_labeled_data:\n some_tokens = tokenizer.tokenize(text_tensor.numpy())\n vocabulary_set.update(some_tokens)\n\nvocab_size = len(vocabulary_set)\nvocab_size",
"_____no_output_____"
]
],
[
[
"### Encode examples\n\nCreate an encoder by passing the `vocabulary_set` to `tfds.features.text.TokenTextEncoder`. The encoder's `encode` method takes in a string of text and returns a list of integers.",
"_____no_output_____"
]
],
[
[
"encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)",
"_____no_output_____"
]
],
[
[
"You can try this on a single line to see what the output looks like.",
"_____no_output_____"
]
],
[
[
"example_text = next(iter(all_labeled_data))[0].numpy()\nprint(example_text)",
"_____no_output_____"
],
[
"encoded_example = encoder.encode(example_text)\nprint(encoded_example)",
"_____no_output_____"
]
],
[
[
"Now run the encoder on the dataset by wrapping it in `tf.py_function` and passing that to the dataset's `map` method.",
"_____no_output_____"
]
],
[
[
"def encode(text_tensor, label):\n encoded_text = encoder.encode(text_tensor.numpy())\n return encoded_text, label\n\ndef encode_map_fn(text, label):\n return tf.py_function(encode, inp=[text, label], Tout=(tf.int64, tf.int64))\n\nall_encoded_data = all_labeled_data.map(encode_map_fn)",
"_____no_output_____"
]
],
[
[
"## Split the dataset into text and train batches\n\nUse `tf.data.Dataset.take` and `tf.data.Dataset.skip` to create a small test dataset and a larger training set.\n\nBefore being passed into the model, the datasets need to be batched. Typically, the examples inside of a batch need to be the same size and shape. But, the examples in these datasets are not all the same size — each line of text had a different number of words. So use `tf.data.Dataset.padded_batch` (instead of `batch`) to pad the examples to the same size.",
"_____no_output_____"
]
],
[
[
"train_data = all_encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE)\ntrain_data = train_data.padded_batch(BATCH_SIZE, padded_shapes=([-1],[]))\n\ntest_data = all_encoded_data.take(TAKE_SIZE)\ntest_data = test_data.padded_batch(BATCH_SIZE, padded_shapes=([-1],[]))",
"_____no_output_____"
]
],
[
[
"Now, `test_data` and `train_data` are not collections of (`example, label`) pairs, but collections of batches. Each batch is a pair of (*many examples*, *many labels*) represented as arrays.\n\nTo illustrate:",
"_____no_output_____"
]
],
[
[
"sample_text, sample_labels = next(iter(test_data))\n\nsample_text[0], sample_labels[0]",
"_____no_output_____"
]
],
[
[
"Since we have introduced a new token encoding (the zero used for padding), the vocabulary size has increased by one.",
"_____no_output_____"
]
],
[
[
"vocab_size += 1",
"_____no_output_____"
]
],
[
[
"## Build the model\n\n",
"_____no_output_____"
]
],
[
[
"model = tf.keras.Sequential()",
"_____no_output_____"
]
],
[
[
"The first layer converts integer representations to dense vector embeddings. See the [Word Embeddings](../../tutorials/sequences/word_embeddings) tutorial for more details. ",
"_____no_output_____"
]
],
[
[
"model.add(tf.keras.layers.Embedding(vocab_size, 64))",
"_____no_output_____"
]
],
[
[
"The next layer is a [Long Short-Term Memory](http://colah.github.io/posts/2015-08-Understanding-LSTMs/) layer, which lets the model understand words in their context with other words. A bidirectional wrapper on the LSTM helps it to learn about the datapoints in relationship to the datapoints that came before it and after it.",
"_____no_output_____"
]
],
[
[
"model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)))",
"_____no_output_____"
]
],
[
[
"Finally we'll have a series of one or more densely connected layers, with the last one being the output layer. The output layer produces a probability for all the labels. The one with the highest probability is the models prediction of an example's label.",
"_____no_output_____"
]
],
[
[
"# One or more dense layers.\n# Edit the list in the `for` line to experiment with layer sizes.\nfor units in [64, 64]:\n model.add(tf.keras.layers.Dense(units, activation='relu'))\n\n# Output layer. The first argument is the number of labels.\nmodel.add(tf.keras.layers.Dense(3, activation='softmax'))",
"_____no_output_____"
]
],
[
[
"Finally, compile the model. For a softmax categorization model, use `sparse_categorical_crossentropy` as the loss function. You can try other optimizers, but `adam` is very common.",
"_____no_output_____"
]
],
[
[
"model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])",
"_____no_output_____"
]
],
[
[
"## Train the model\n\nThis model running on this data produces decent results (about 83%).",
"_____no_output_____"
]
],
[
[
"model.fit(train_data, epochs=3, validation_data=test_data)",
"_____no_output_____"
],
[
"eval_loss, eval_acc = model.evaluate(test_data)\n\nprint('\\nEval loss: {:.3f}, Eval accuracy: {:.3f}'.format(eval_loss, eval_acc))",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7fb445694ef571cf78cc94ff45cc66bb50630f1 | 3,631 | ipynb | Jupyter Notebook | fMRI_Data_Analysis.ipynb | npinak/fMRI | 30ce9f1111e37967e5958e1ad0bbb30fcca8bf12 | [
"MIT"
] | null | null | null | fMRI_Data_Analysis.ipynb | npinak/fMRI | 30ce9f1111e37967e5958e1ad0bbb30fcca8bf12 | [
"MIT"
] | null | null | null | fMRI_Data_Analysis.ipynb | npinak/fMRI | 30ce9f1111e37967e5958e1ad0bbb30fcca8bf12 | [
"MIT"
] | null | null | null | 36.31 | 385 | 0.576976 | [
[
[
"**fMRI Preprocessing**",
"_____no_output_____"
],
[
"- Converted DICOM images to NIfTI files.\n- Used SPM and R to preprocess fMRI data by correcting for slice timing, realigning to compensate for head motion, co-registering the fMRI data to a MP-RAGE image using a rigid body transformation, spatially normalizing the co-registered images to a T1 template, applying gaussian spatial smoothing, segmentation on the anatomical image, and finally bias field correction.\n- Used script to preprocess data from multiple subjects automatically.\n",
"_____no_output_____"
],
[
"**fMRI Analysis**",
"_____no_output_____"
],
[
"**Replicated a study done by Kelly et al.:**\n\nKelly, A.M., Uddin, L.Q., Biswal, B.B., Castellanos, F.X., Milham, M.P. (2008). Competition between functional brain networks mediates behavioral variability. Neuroimage, 39(1):527-37\n\n- Used SPM to create a GLM to model BOLD responses obtained from a subject performing the “flanker” task. \n \n - Defined contrasts to find differences in brain activation between “incongruent” and “congruent” trials. \n \n - Results showed a significant clusters in the medial prefrontal cortex and anterior cingulate cortex. \n\n- Used SPM to run a group-level analysis on the flanker task. \n\n - Defined contrasts to show differences in brain activation between incongruent and congruent trials at a group level.\n\n - Results showed a significant cluster in the dorsal medial prefrontal cortex. \n\n- Used SPM to run a region of interest analysis. \n\n - Used WFU PickAtlas (Atlas ROI) and Marsbar (Spherical ROI) to define region of interest (dorsal anterior cingulate cortex).\n\n - Extracted mean contrast estimates for incongruent and congruent and entered the means into a paired-samples t-test. \n\n - Spherical ROI showed significant activation in the dACC. Atlas ROI did not show significant activation in the dACC. \n",
"_____no_output_____"
],
[
"**fMRI Multi-Voxel Pattern Analysis**",
"_____no_output_____"
],
[
"- Used AFNI to train an algorithm to determine whether a person saw a House, Face, Car, or a Shoe based on a beta map that it was given.\n - Generated betas, created training set, and created testing set for each category\n - Created brain mask to test specific areas and tested algorithm. \n",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7fb47e67456c92b1171b67b077c325dc249fa9e | 32,701 | ipynb | Jupyter Notebook | Files/.ipynb_checkpoints/fmovies_tidy-checkpoint.ipynb | nibukdk/web-scrapping-fmovie.to | af274c3fee252cf75f1422020f546da25b5275a2 | [
"MIT"
] | 1 | 2021-05-14T20:01:21.000Z | 2021-05-14T20:01:21.000Z | Files/.ipynb_checkpoints/fmovies_tidy-checkpoint.ipynb | islamux/web-scrapping-fmovie.to | af274c3fee252cf75f1422020f546da25b5275a2 | [
"MIT"
] | null | null | null | Files/.ipynb_checkpoints/fmovies_tidy-checkpoint.ipynb | islamux/web-scrapping-fmovie.to | af274c3fee252cf75f1422020f546da25b5275a2 | [
"MIT"
] | 2 | 2021-05-14T19:57:56.000Z | 2021-05-24T01:33:29.000Z | 31.810311 | 250 | 0.455277 | [
[
[
"# Introduction.",
"_____no_output_____"
],
[
"Project is the continuation of web crawling of website fmovies's [most-watched](https://fmovies.to/most-watched) section analysis for the website. \nThis is the second part. In part one we crawled websites and extracted informations. In part two we will tidy and clean the data for analysis in third part.",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np",
"_____no_output_____"
],
[
"movie_df = pd.read_csv('../Data/final_movies_df.csv')\ntv_df = pd.read_csv('../Data/final_tvs_df.csv')",
"_____no_output_____"
],
[
"print(movie_df.columns)\nprint(tv_df.columns)",
"Index(['movie_name', 'watch_link', 'date_added', 'site_rank', 'Genre', 'Stars',\n 'IMDb', 'Director', 'Release', 'Country', 'Rating'],\n dtype='object')\nIndex(['tv_name', 'watch_link', 'season', 'episodes', 'date_added',\n 'site_rank', 'Genre', 'Stars', 'IMDb', 'Director', 'Release', 'Country',\n 'Rating'],\n dtype='object')\n"
],
[
"movie_df.head()",
"_____no_output_____"
]
],
[
[
"# Columns\n\n- 'movie_name/ tv_name' : Name of movie / tv \n- 'watch_link': Url link for page to watch movie/tv, \n- 'date_added': Date added to df not in fmovies\n- 'site_rank': Ranking in the fmovies by order of most watched starting from 1.\n- 'Genre': Genres\n- 'Stars': Cast,\n- 'IMDb': IMDb ratings,\n- 'Director': Director, \n- 'Release': Released Date for Movie/TV,\n- 'Country': Origin country can be more than one\n- 'Rating'- Average reviews by viewers on the fmovies.to websie\n- 'season' - Which season, only for tv shows\n- 'episodes' - Number of episoded available for tv shows \n",
"_____no_output_____"
],
[
"## Rename Columns All Uppercase",
"_____no_output_____"
]
],
[
[
"movie_df.columns = movie_df.columns.str.upper().tolist()\ntv_df.columns = tv_df.columns.str.upper().tolist()",
"_____no_output_____"
],
[
"tv_df.head(2)",
"_____no_output_____"
],
[
"movie_df.head(2)",
"_____no_output_____"
]
],
[
[
"# Tidying",
"_____no_output_____"
],
[
"1. Genre section has list of values in one row, lets make one value per row.\n2. Released Data can be converted to date time and then to index of df\n3. Ratings have to values, 1st is the site ratings and second is number of reviews by viewers. Lets separate them different columns.",
"_____no_output_____"
],
[
"## Genre Split and Date Column",
"_____no_output_____"
],
[
"Lets make a function that splits and stacks the genre into multiple rows, like [this](https://stackoverflow.com/questions/17116814/pandas-how-do-i-split-text-in-a-column-into-multiple-rows/21032532). More, lets just reset index to release date.",
"_____no_output_____"
]
],
[
[
"def split_genre(df):\n \n cp= df.copy()\n \n # Spilt the genre by \",\" and stack to make muliple rows each with own unique genre\n # this will return a new df with genres only\n genre= cp.GENRE.str.split(',').apply(pd.Series, 1).stack()\n \n # Pop one of index\n genre.index = genre.index.droplevel(-1)\n \n # Provide name to series\n genre.name= \"GENRE\"\n \n \n #delete the original genre from original df\n cp.drop(\"GENRE\", axis=True, inplace=True)\n \n # Create a new df \n new_df = cp.copy().join(genre)\n # change release date from string to datetime and drop release column\n new_df['Date'] = pd.to_datetime(new_df['RELEASE'], format=\"%Y-%m-%d\")\n new_df.drop('RELEASE', axis=1, inplace=True)\n # Reset index\n new_df.set_index('Date',drop=True, inplace=True)\n \n return new_df",
"_____no_output_____"
],
[
"movie_df_tidy_1 = split_genre(movie_df)",
"_____no_output_____"
],
[
"tv_df_tidy_1 = split_genre(tv_df)",
"_____no_output_____"
]
],
[
[
"## Ratings Columns Split ",
"_____no_output_____"
]
],
[
[
"site_user_rating_4movie = movie_df_tidy_1.RATING.str.split(\"/\").str[0]\nsite_number_user_rated_4movie = movie_df_tidy_1.RATING.str.split(\"/\").str[1].str.split(\" \").str[0]\n",
"_____no_output_____"
],
[
"site_user_rating_4tv = tv_df_tidy_1.RATING.str.split(\"/\").str[0]\nsite_number_user_rated_4tv = tv_df_tidy_1.RATING.str.split(\"/\").str[1].str.split(\" \").str[0]\n",
"_____no_output_____"
]
],
[
[
"### Assign New cols and Drop the olds",
"_____no_output_____"
]
],
[
[
"tv_df_tidy_2 = tv_df_tidy_1.copy()\nmovie_df_tidy_2= movie_df_tidy_1.copy()",
"_____no_output_____"
],
[
"movie_df_tidy_2['User_Reviews_local'] = site_user_rating_4movie\nmovie_df_tidy_2['Number_Reviews_local'] = site_number_user_rated_4movie",
"_____no_output_____"
],
[
"tv_df_tidy_2['User_Reviews_local'] = site_user_rating_4tv\ntv_df_tidy_2['Number_Reviews_local'] = site_number_user_rated_4tv",
"_____no_output_____"
],
[
"tv_df_tidy_2.drop('RATING', inplace=True,axis=1)\nmovie_df_tidy_2.drop('RATING', inplace=True,axis=1)",
"_____no_output_____"
]
],
[
[
"# Missing Vlaues",
"_____no_output_____"
]
],
[
[
"print(movie_df_tidy_2.info())\n\nprint(\"**\"*20)\nprint(tv_df_tidy_2.info())",
"<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 3790 entries, 2019-04-22 to 2007-02-09\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 MOVIE_NAME 3790 non-null object \n 1 WATCH_LINK 3790 non-null object \n 2 DATE_ADDED 3790 non-null object \n 3 SITE_RANK 3790 non-null int64 \n 4 STARS 3788 non-null object \n 5 IMDB 3788 non-null float64\n 6 DIRECTOR 3788 non-null object \n 7 COUNTRY 3788 non-null object \n 8 GENRE 3788 non-null object \n 9 User_Reviews_local 3788 non-null object \n 10 Number_Reviews_local 3788 non-null object \ndtypes: float64(1), int64(1), object(9)\nmemory usage: 355.3+ KB\nNone\n****************************************\n<class 'pandas.core.frame.DataFrame'>\nDatetimeIndex: 764 entries, 2011-04-17 to 2019-03-28\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 TV_NAME 764 non-null object \n 1 WATCH_LINK 764 non-null object \n 2 SEASON 764 non-null int64 \n 3 EPISODES 764 non-null int64 \n 4 DATE_ADDED 764 non-null object \n 5 SITE_RANK 764 non-null int64 \n 6 STARS 764 non-null object \n 7 IMDB 764 non-null float64\n 8 DIRECTOR 764 non-null object \n 9 COUNTRY 764 non-null object \n 10 GENRE 764 non-null object \n 11 User_Reviews_local 764 non-null object \n 12 Number_Reviews_local 764 non-null object \ndtypes: float64(1), int64(3), object(9)\nmemory usage: 83.6+ KB\nNone\n"
]
],
[
[
"It seems only movies has null vaules, lets dive deeper.",
"_____no_output_____"
]
],
[
[
"movie_df_tidy_2[movie_df_tidy_2.GENRE.isnull()]",
"_____no_output_____"
]
],
[
[
" Earlier to prevent prolongation of crawling, we returned nan for bad requests. We can individually go throguh each link to values but lets drop them for now.",
"_____no_output_____"
]
],
[
[
"movie_df_tidy_2.dropna(inplace=True,axis=0)",
"_____no_output_____"
]
],
[
[
"# Write file for analysis part",
"_____no_output_____"
],
[
"Index false argument on write will remove date index so lets not do that.",
"_____no_output_____"
]
],
[
[
"movie_df_tidy_2.to_csv('../Data/Movie.csv')\ntv_df_tidy_2.to_csv('../Data/TV.csv')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7fb4aabecef329d09941949656da49a83828b7e | 19,675 | ipynb | Jupyter Notebook | configuration.ipynb | mesameki/MachineLearningNotebooks | 4fe8c1702d5d2934beee599e977fd7581c441780 | [
"MIT"
] | 2 | 2020-07-12T02:37:49.000Z | 2021-09-09T09:55:32.000Z | configuration.ipynb | mesameki/MachineLearningNotebooks | 4fe8c1702d5d2934beee599e977fd7581c441780 | [
"MIT"
] | null | null | null | configuration.ipynb | mesameki/MachineLearningNotebooks | 4fe8c1702d5d2934beee599e977fd7581c441780 | [
"MIT"
] | 3 | 2020-07-14T21:33:01.000Z | 2021-05-20T17:27:48.000Z | 51.370757 | 638 | 0.614841 | [
[
[
"Copyright (c) Microsoft Corporation. All rights reserved.\n\nLicensed under the MIT License.",
"_____no_output_____"
],
[
"![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/configuration.png)",
"_____no_output_____"
],
[
"# Configuration\n\n_**Setting up your Azure Machine Learning services workspace and configuring your notebook library**_\n\n---\n---\n\n## Table of Contents\n\n1. [Introduction](#Introduction)\n 1. What is an Azure Machine Learning workspace\n1. [Setup](#Setup)\n 1. Azure subscription\n 1. Azure ML SDK and other library installation\n 1. Azure Container Instance registration\n1. [Configure your Azure ML Workspace](#Configure%20your%20Azure%20ML%20workspace)\n 1. Workspace parameters\n 1. Access your workspace\n 1. Create a new workspace\n 1. Create compute resources\n1. [Next steps](#Next%20steps)\n\n---\n\n## Introduction\n\nThis notebook configures your library of notebooks to connect to an Azure Machine Learning (ML) workspace. In this case, a library contains all of the notebooks in the current folder and any nested folders. You can configure this notebook library to use an existing workspace or create a new workspace.\n\nTypically you will need to run this notebook only once per notebook library as all other notebooks will use connection information that is written here. If you want to redirect your notebook library to work with a different workspace, then you should re-run this notebook.\n\nIn this notebook you will\n* Learn about getting an Azure subscription\n* Specify your workspace parameters\n* Access or create your workspace\n* Add a default compute cluster for your workspace\n\n### What is an Azure Machine Learning workspace\n\nAn Azure ML Workspace is an Azure resource that organizes and coordinates the actions of many other Azure resources to assist in executing and sharing machine learning workflows. In particular, an Azure ML Workspace coordinates storage, databases, and compute resources providing added functionality for machine learning experimentation, deployment, inference, and the monitoring of deployed models.",
"_____no_output_____"
],
[
"## Setup\n\nThis section describes activities required before you can access any Azure ML services functionality.",
"_____no_output_____"
],
[
"### 1. Azure Subscription\n\nIn order to create an Azure ML Workspace, first you need access to an Azure subscription. An Azure subscription allows you to manage storage, compute, and other assets in the Azure cloud. You can [create a new subscription](https://azure.microsoft.com/en-us/free/) or access existing subscription information from the [Azure portal](https://portal.azure.com). Later in this notebook you will need information such as your subscription ID in order to create and access AML workspaces.\n\n### 2. Azure ML SDK and other library installation\n\nIf you are running in your own environment, follow [SDK installation instructions](https://docs.microsoft.com/azure/machine-learning/service/how-to-configure-environment). If you are running in Azure Notebooks or another Microsoft managed environment, the SDK is already installed.\n\nAlso install following libraries to your environment. Many of the example notebooks depend on them\n\n```\n(myenv) $ conda install -y matplotlib tqdm scikit-learn\n```\n\nOnce installation is complete, the following cell checks the Azure ML SDK version:",
"_____no_output_____"
]
],
[
[
"import azureml.core\n\nprint(\"This notebook was created using version 1.0.48\r\n of the Azure ML SDK\")\nprint(\"You are currently using version\", azureml.core.VERSION, \"of the Azure ML SDK\")",
"_____no_output_____"
]
],
[
[
"If you are using an older version of the SDK then this notebook was created using, you should upgrade your SDK.\n\n### 3. Azure Container Instance registration\nAzure Machine Learning uses of [Azure Container Instance (ACI)](https://azure.microsoft.com/services/container-instances) to deploy dev/test web services. An Azure subscription needs to be registered to use ACI. If you or the subscription owner have not yet registered ACI on your subscription, you will need to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) and execute the following commands. Note that if you ran through the AML [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) you have already registered ACI. \n\n```shell\n# check to see if ACI is already registered\n(myenv) $ az provider show -n Microsoft.ContainerInstance -o table\n\n# if ACI is not registered, run this command.\n# note you need to be the subscription owner in order to execute this command successfully.\n(myenv) $ az provider register -n Microsoft.ContainerInstance\n```\n\n---",
"_____no_output_____"
],
[
"## Configure your Azure ML workspace\n\n### Workspace parameters\n\nTo use an AML Workspace, you will need to import the Azure ML SDK and supply the following information:\n* Your subscription id\n* A resource group name\n* (optional) The region that will host your workspace\n* A name for your workspace\n\nYou can get your subscription ID from the [Azure portal](https://portal.azure.com).\n\nYou will also need access to a [_resource group_](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-overview#resource-groups), which organizes Azure resources and provides a default region for the resources in a group. You can see what resource groups to which you have access, or create a new one in the [Azure portal](https://portal.azure.com). If you don't have a resource group, the create workspace command will create one for you using the name you provide.\n\nThe region to host your workspace will be used if you are creating a new workspace. You do not need to specify this if you are using an existing workspace. You can find the list of supported regions [here](https://azure.microsoft.com/en-us/global-infrastructure/services/?products=machine-learning-service). You should pick a region that is close to your location or that contains your data.\n\nThe name for your workspace is unique within the subscription and should be descriptive enough to discern among other AML Workspaces. The subscription may be used only by you, or it may be used by your department or your entire enterprise, so choose a name that makes sense for your situation.\n\nThe following cell allows you to specify your workspace parameters. This cell uses the python method `os.getenv` to read values from environment variables which is useful for automation. If no environment variable exists, the parameters will be set to the specified default values. \n\nIf you ran the Azure Machine Learning [quickstart](https://docs.microsoft.com/en-us/azure/machine-learning/service/quickstart-get-started) in Azure Notebooks, you already have a configured workspace! You can go to your Azure Machine Learning Getting Started library, view *config.json* file, and copy-paste the values for subscription ID, resource group and workspace name below.\n\nReplace the default values in the cell below with your workspace parameters",
"_____no_output_____"
]
],
[
[
"import os\n\nsubscription_id = os.getenv(\"SUBSCRIPTION_ID\", default=\"<my-subscription-id>\")\nresource_group = os.getenv(\"RESOURCE_GROUP\", default=\"<my-resource-group>\")\nworkspace_name = os.getenv(\"WORKSPACE_NAME\", default=\"<my-workspace-name>\")\nworkspace_region = os.getenv(\"WORKSPACE_REGION\", default=\"eastus2\")",
"_____no_output_____"
]
],
[
[
"### Access your workspace\n\nThe following cell uses the Azure ML SDK to attempt to load the workspace specified by your parameters. If this cell succeeds, your notebook library will be configured to access the workspace from all notebooks using the `Workspace.from_config()` method. The cell can fail if the specified workspace doesn't exist or you don't have permissions to access it. ",
"_____no_output_____"
]
],
[
[
"from azureml.core import Workspace\n\ntry:\n ws = Workspace(subscription_id = subscription_id, resource_group = resource_group, workspace_name = workspace_name)\n # write the details of the workspace to a configuration file to the notebook library\n ws.write_config()\n print(\"Workspace configuration succeeded. Skip the workspace creation steps below\")\nexcept:\n print(\"Workspace not accessible. Change your parameters or create a new workspace below\")",
"_____no_output_____"
]
],
[
[
"### Create a new workspace\n\nIf you don't have an existing workspace and are the owner of the subscription or resource group, you can create a new workspace. If you don't have a resource group, the create workspace command will create one for you using the name you provide.\n\n**Note**: As with other Azure services, there are limits on certain resources (for example AmlCompute quota) associated with the Azure ML service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.\n\nThis cell will create an Azure ML workspace for you in a subscription provided you have the correct permissions.\n\nThis will fail if:\n* You do not have permission to create a workspace in the resource group\n* You do not have permission to create a resource group if it's non-existing.\n* You are not a subscription owner or contributor and no Azure ML workspaces have ever been created in this subscription\n\nIf workspace creation fails, please work with your IT admin to provide you with the appropriate permissions or to provision the required resources.",
"_____no_output_____"
]
],
[
[
"from azureml.core import Workspace\n\n# Create the workspace using the specified parameters\nws = Workspace.create(name = workspace_name,\n subscription_id = subscription_id,\n resource_group = resource_group, \n location = workspace_region,\n create_resource_group = True,\n exist_ok = True)\nws.get_details()\n\n# write the details of the workspace to a configuration file to the notebook library\nws.write_config()",
"_____no_output_____"
]
],
[
[
"### Create compute resources for your training experiments\n\nMany of the sample notebooks use Azure ML managed compute (AmlCompute) to train models using a dynamically scalable pool of compute. In this section you will create default compute clusters for use by the other notebooks and any other operations you choose.\n\nTo create a cluster, you need to specify a compute configuration that specifies the type of machine to be used and the scalability behaviors. Then you choose a name for the cluster that is unique within the workspace that can be used to address the cluster later.\n\nThe cluster parameters are:\n* vm_size - this describes the virtual machine type and size used in the cluster. All machines in the cluster are the same type. You can get the list of vm sizes available in your region by using the CLI command\n\n```shell\naz vm list-skus -o tsv\n```\n* min_nodes - this sets the minimum size of the cluster. If you set the minimum to 0 the cluster will shut down all nodes while not in use. Setting this number to a value higher than 0 will allow for faster start-up times, but you will also be billed when the cluster is not in use.\n* max_nodes - this sets the maximum size of the cluster. Setting this to a larger number allows for more concurrency and a greater distributed processing of scale-out jobs.\n\n\nTo create a **CPU** cluster now, run the cell below. The autoscale settings mean that the cluster will scale down to 0 nodes when inactive and up to 4 nodes when busy.",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n# Choose a name for your CPU cluster\ncpu_cluster_name = \"cpu-cluster\"\n\n# Verify that cluster does not exist already\ntry:\n cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)\n print(\"Found existing cpu-cluster\")\nexcept ComputeTargetException:\n print(\"Creating new cpu-cluster\")\n \n # Specify the configuration for the new cluster\n compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_D2_V2\",\n min_nodes=0,\n max_nodes=4)\n\n # Create the cluster with the specified name and configuration\n cpu_cluster = ComputeTarget.create(ws, cpu_cluster_name, compute_config)\n \n # Wait for the cluster to complete, show the output log\n cpu_cluster.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"To create a **GPU** cluster, run the cell below. Note that your subscription must have sufficient quota for GPU VMs or the command will fail. To increase quota, see [these instructions](https://docs.microsoft.com/en-us/azure/azure-supportability/resource-manager-core-quotas-request). ",
"_____no_output_____"
]
],
[
[
"from azureml.core.compute import ComputeTarget, AmlCompute\nfrom azureml.core.compute_target import ComputeTargetException\n\n# Choose a name for your GPU cluster\ngpu_cluster_name = \"gpu-cluster\"\n\n# Verify that cluster does not exist already\ntry:\n gpu_cluster = ComputeTarget(workspace=ws, name=gpu_cluster_name)\n print(\"Found existing gpu cluster\")\nexcept ComputeTargetException:\n print(\"Creating new gpu-cluster\")\n \n # Specify the configuration for the new cluster\n compute_config = AmlCompute.provisioning_configuration(vm_size=\"STANDARD_NC6\",\n min_nodes=0,\n max_nodes=4)\n # Create the cluster with the specified name and configuration\n gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, compute_config)\n\n # Wait for the cluster to complete, show the output log\n gpu_cluster.wait_for_completion(show_output=True)",
"_____no_output_____"
]
],
[
[
"---\n\n## Next steps\n\nIn this notebook you configured this notebook library to connect easily to an Azure ML workspace. You can copy this notebook to your own libraries to connect them to you workspace, or use it to bootstrap new workspaces completely.\n\nIf you came here from another notebook, you can return there and complete that exercise, or you can try out the [Tutorials](./tutorials) or jump into \"how-to\" notebooks and start creating and deploying models. A good place to start is the [train within notebook](./how-to-use-azureml/training/train-within-notebook) example that walks through a simplified but complete end to end machine learning process.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7fb578499c4129ee1c2a7c95a78c8836f3d22b2 | 72,369 | ipynb | Jupyter Notebook | lessons/01.base-types.ipynb | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | lessons/01.base-types.ipynb | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | 4 | 2018-12-19T13:41:12.000Z | 2019-01-14T15:11:11.000Z | lessons/01.base-types.ipynb | aodarc/LIST-010 | 4579a047ca1ae0266f368349ea4536c6eb367f97 | [
"MIT"
] | null | null | null | 19.221514 | 424 | 0.436858 | [
[
[
"a = 'Hello'",
"_____no_output_____"
],
[
"d = {'vasilii': 'red'}",
"_____no_output_____"
],
[
"d['vasilii']",
"_____no_output_____"
],
[
"7.42",
"_____no_output_____"
],
[
"import math",
"_____no_output_____"
],
[
"math.pi",
"_____no_output_____"
],
[
"3.30857190837527357273572983 + 0.19375691365917235662937569",
"_____no_output_____"
],
[
"3.30857190837527357273572983 * 0.19375691365917235662937569",
"_____no_output_____"
],
[
"import math",
"_____no_output_____"
],
[
"dir(math) ",
"_____no_output_____"
],
[
"help(math)",
"Help on module math:\n\nNAME\n math\n\nMODULE REFERENCE\n https://docs.python.org/3.7/library/math\n \n The following documentation is automatically generated from the Python\n source files. It may be incomplete, incorrect or include features that\n are considered implementation detail and may vary between Python\n implementations. When in doubt, consult the module reference at the\n location listed above.\n\nDESCRIPTION\n This module is always available. It provides access to the\n mathematical functions defined by the C standard.\n\nFUNCTIONS\n acos(x, /)\n Return the arc cosine (measured in radians) of x.\n \n acosh(x, /)\n Return the inverse hyperbolic cosine of x.\n \n asin(x, /)\n Return the arc sine (measured in radians) of x.\n \n asinh(x, /)\n Return the inverse hyperbolic sine of x.\n \n atan(x, /)\n Return the arc tangent (measured in radians) of x.\n \n atan2(y, x, /)\n Return the arc tangent (measured in radians) of y/x.\n \n Unlike atan(y/x), the signs of both x and y are considered.\n \n atanh(x, /)\n Return the inverse hyperbolic tangent of x.\n \n ceil(x, /)\n Return the ceiling of x as an Integral.\n \n This is the smallest integer >= x.\n \n copysign(x, y, /)\n Return a float with the magnitude (absolute value) of x but the sign of y.\n \n On platforms that support signed zeros, copysign(1.0, -0.0)\n returns -1.0.\n \n cos(x, /)\n Return the cosine of x (measured in radians).\n \n cosh(x, /)\n Return the hyperbolic cosine of x.\n \n degrees(x, /)\n Convert angle x from radians to degrees.\n \n erf(x, /)\n Error function at x.\n \n erfc(x, /)\n Complementary error function at x.\n \n exp(x, /)\n Return e raised to the power of x.\n \n expm1(x, /)\n Return exp(x)-1.\n \n This function avoids the loss of precision involved in the direct evaluation of exp(x)-1 for small x.\n \n fabs(x, /)\n Return the absolute value of the float x.\n \n factorial(x, /)\n Find x!.\n \n Raise a ValueError if x is negative or non-integral.\n \n floor(x, /)\n Return the floor of x as an Integral.\n \n This is the largest integer <= x.\n \n fmod(x, y, /)\n Return fmod(x, y), according to platform C.\n \n x % y may differ.\n \n frexp(x, /)\n Return the mantissa and exponent of x, as pair (m, e).\n \n m is a float and e is an int, such that x = m * 2.**e.\n If x is 0, m and e are both 0. Else 0.5 <= abs(m) < 1.0.\n \n fsum(seq, /)\n Return an accurate floating point sum of values in the iterable seq.\n \n Assumes IEEE-754 floating point arithmetic.\n \n gamma(x, /)\n Gamma function at x.\n \n gcd(x, y, /)\n greatest common divisor of x and y\n \n hypot(x, y, /)\n Return the Euclidean distance, sqrt(x*x + y*y).\n \n isclose(a, b, *, rel_tol=1e-09, abs_tol=0.0)\n Determine whether two floating point numbers are close in value.\n \n rel_tol\n maximum difference for being considered \"close\", relative to the\n magnitude of the input values\n abs_tol\n maximum difference for being considered \"close\", regardless of the\n magnitude of the input values\n \n Return True if a is close in value to b, and False otherwise.\n \n For the values to be considered close, the difference between them\n must be smaller than at least one of the tolerances.\n \n -inf, inf and NaN behave similarly to the IEEE 754 Standard. That\n is, NaN is not close to anything, even itself. inf and -inf are\n only close to themselves.\n \n isfinite(x, /)\n Return True if x is neither an infinity nor a NaN, and False otherwise.\n \n isinf(x, /)\n Return True if x is a positive or negative infinity, and False otherwise.\n \n isnan(x, /)\n Return True if x is a NaN (not a number), and False otherwise.\n \n ldexp(x, i, /)\n Return x * (2**i).\n \n This is essentially the inverse of frexp().\n \n lgamma(x, /)\n Natural logarithm of absolute value of Gamma function at x.\n \n log(...)\n log(x, [base=math.e])\n Return the logarithm of x to the given base.\n \n If the base not specified, returns the natural logarithm (base e) of x.\n \n log10(x, /)\n Return the base 10 logarithm of x.\n \n log1p(x, /)\n Return the natural logarithm of 1+x (base e).\n \n The result is computed in a way which is accurate for x near zero.\n \n log2(x, /)\n Return the base 2 logarithm of x.\n \n modf(x, /)\n Return the fractional and integer parts of x.\n \n Both results carry the sign of x and are floats.\n \n pow(x, y, /)\n Return x**y (x to the power of y).\n \n radians(x, /)\n Convert angle x from degrees to radians.\n \n remainder(x, y, /)\n Difference between x and the closest integer multiple of y.\n \n Return x - n*y where n*y is the closest integer multiple of y.\n In the case where x is exactly halfway between two multiples of\n y, the nearest even value of n is used. The result is always exact.\n \n sin(x, /)\n Return the sine of x (measured in radians).\n \n sinh(x, /)\n Return the hyperbolic sine of x.\n \n sqrt(x, /)\n Return the square root of x.\n \n tan(x, /)\n Return the tangent of x (measured in radians).\n \n tanh(x, /)\n Return the hyperbolic tangent of x.\n \n trunc(x, /)\n Truncates the Real x to the nearest Integral toward 0.\n \n Uses the __trunc__ magic method.\n\nDATA\n e = 2.718281828459045\n inf = inf\n nan = nan\n pi = 3.141592653589793\n tau = 6.283185307179586\n\nFILE\n /usr/local/Cellar/python/3.7.0/Frameworks/Python.framework/Versions/3.7/lib/python3.7/lib-dynload/math.cpython-37m-darwin.so\n\n\n"
],
[
"l = [1,2,3,4,5]",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = [1,2,3,4,5, 'asfasf', [(1,2,3), {1,2,3,3}]]",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"a = 42",
"_____no_output_____"
],
[
"b = 22",
"_____no_output_____"
],
[
"a + b",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l.pop()",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l.pop(0)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l.pop(87) ",
"_____no_output_____"
],
[
"a = 'asgasga'",
"_____no_output_____"
],
[
"a1 = \"asjnas\"",
"_____no_output_____"
],
[
"a2 = 'Man\\'s'",
"_____no_output_____"
],
[
"a3 = \"\"\" asjghasjkgh lasghlas\"\"\"\"",
"_____no_output_____"
],
[
"a3 = \"\"\" asjghasjkgh lasghlas\"\"\"",
"_____no_output_____"
],
[
"a4 = ''' aljsfglasglj\nakjshgkjadg\nalsgnljad'''",
"_____no_output_____"
],
[
"a4",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"a[1] ",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l[1]",
"_____no_output_____"
],
[
"l[1] = [42,42,42]",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"s[1] = 'g'",
"_____no_output_____"
],
[
"a[1] = 'g'",
"_____no_output_____"
],
[
"new = a[0:2] + a[2:]",
"_____no_output_____"
],
[
"new",
"_____no_output_____"
],
[
"new = a[0:2] + a[3:]",
"_____no_output_____"
],
[
"new",
"_____no_output_____"
],
[
"a[-2]",
"_____no_output_____"
],
[
"new = a[:-2]",
"_____no_output_____"
],
[
"new",
"_____no_output_____"
],
[
"a[:-2:2]",
"_____no_output_____"
],
[
"a='1234567'",
"_____no_output_____"
],
[
"a[:-2:2]",
"_____no_output_____"
],
[
"len(a)",
"_____no_output_____"
],
[
"len(l)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"a = '123'",
"_____no_output_____"
],
[
"id(a)",
"_____no_output_____"
],
[
"b='5678' ",
"_____no_output_____"
],
[
"id(b)",
"_____no_output_____"
],
[
"c = a+b",
"_____no_output_____"
],
[
"id(c)",
"_____no_output_____"
],
[
"a = 42",
"_____no_output_____"
],
[
"b=a",
"_____no_output_____"
],
[
"'42'.isdigit()",
"_____no_output_____"
],
[
"'42f'.isdigit()",
"_____no_output_____"
],
[
"b'asfasf'",
"_____no_output_____"
],
[
"numb = 42",
"_____no_output_____"
],
[
"numb = '42'",
"_____no_output_____"
],
[
"'42' = 55",
"_____no_output_____"
],
[
"def foo():\n print('hello world')",
"_____no_output_____"
],
[
"foo",
"_____no_output_____"
],
[
"foo2 = 42",
"_____no_output_____"
],
[
"foo2 = foo",
"_____no_output_____"
],
[
"foo()",
"hello world\n"
],
[
"foo2()",
"hello world\n"
],
[
"foo is foo2",
"_____no_output_____"
],
[
"foo2",
"_____no_output_____"
],
[
"foo",
"_____no_output_____"
],
[
"id(foo) == if(foo2)",
"_____no_output_____"
],
[
"id(foo) == id(foo2)",
"_____no_output_____"
],
[
"name = 'Ostap'",
"_____no_output_____"
],
[
"age = 42",
"_____no_output_____"
],
[
"'Name: '",
"_____no_output_____"
],
[
"'Name: {1}, Age: {0}'.format(age, name)",
"_____no_output_____"
],
[
"f'Name: {name}, Age: {age}'",
"_____no_output_____"
],
[
"f'Name: {name.capitalize()}, Age: {age}'",
"_____no_output_____"
],
[
"f'Name: {name.upper())}, Age: {age}'",
"_____no_output_____"
],
[
"f'Name: {name.upper()}, Age: {age}'",
"_____no_output_____"
],
[
"l = [name, age]",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"name",
"_____no_output_____"
],
[
"age",
"_____no_output_____"
],
[
"name = \"Alina\"",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"list_zero = [1,2,3,4]",
"_____no_output_____"
],
[
"l = [list_zero, 42]",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"list_zero.append(42)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"import sys",
"_____no_output_____"
],
[
"sys.getsizeof(l)",
"_____no_output_____"
],
[
"s = 'abcdefg'",
"_____no_output_____"
],
[
"s[-3:]",
"_____no_output_____"
],
[
"s[-3::-1]",
"_____no_output_____"
],
[
"s[::-1] ",
"_____no_output_____"
],
[
"st = {1,2,3,4}",
"_____no_output_____"
],
[
"st[::-1]",
"_____no_output_____"
],
[
"[[]] * 3",
"_____no_output_____"
],
[
"[[0,0,0]] * 3",
"_____no_output_____"
],
[
"l = [[0,0,0]] * 3",
"_____no_output_____"
],
[
"l[0].append(42)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = [0,0,0] + [0,0,0] + [0,0,0] ",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = [[0,0,0]] + [[0,0,0]] + [[0,0,0]] ",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = [1,2,3,4, \"42\"]",
"_____no_output_____"
],
[
"l2 = l",
"_____no_output_____"
],
[
"l.pop()",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l2",
"_____no_output_____"
],
[
"l2 = l[:]",
"_____no_output_____"
],
[
"l2",
"_____no_output_____"
],
[
"l.pop()",
"_____no_output_____"
],
[
"l2",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l.copy()",
"_____no_output_____"
],
[
"s",
"_____no_output_____"
],
[
"l = list(s)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"list([1,2,,3,4])",
"_____no_output_____"
],
[
"list([1,2,3,4])",
"_____no_output_____"
],
[
"l0 = [1,2,3,4]",
"_____no_output_____"
],
[
"l1= list(l0)",
"_____no_output_____"
],
[
"id(l0), id(l1) ",
"_____no_output_____"
],
[
"list2 = list",
"_____no_output_____"
],
[
"list = 42",
"_____no_output_____"
],
[
"list(s)",
"_____no_output_____"
],
[
"list = list2",
"_____no_output_____"
],
[
"list()",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"res = l.append(42)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"res",
"_____no_output_____"
],
[
"print(res)",
"None\n"
],
[
"l = l.append(42)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = list(s)",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l.index('ggg')",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l = list('asgwrhvsw')",
"_____no_output_____"
],
[
"l.sort()",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"ord('a')",
"_____no_output_____"
],
[
"ord('ю')",
"_____no_output_____"
],
[
"ord('A')",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"id(l)",
"_____no_output_____"
],
[
"hash(id(l))",
"_____no_output_____"
],
[
"hash(l)",
"_____no_output_____"
],
[
"d = dict()",
"_____no_output_____"
],
[
"d",
"_____no_output_____"
],
[
"st = {1,2,3}",
"_____no_output_____"
],
[
"st = {}",
"_____no_output_____"
],
[
"type(st)",
"_____no_output_____"
],
[
"st = set()",
"_____no_output_____"
],
[
"type(st)",
"_____no_output_____"
],
[
"d = {'lst': [1,2,3]}",
"_____no_output_____"
],
[
"d['lst'].append(42)",
"_____no_output_____"
],
[
"d",
"_____no_output_____"
],
[
"d['afsf']",
"_____no_output_____"
],
[
"d.get('asf')",
"_____no_output_____"
],
[
"data = d.get('asfasfg')",
"_____no_output_____"
],
[
"if data is None:\n print('asfasfasasgasgasg')",
"asfasfasasgasgasg\n"
],
[
"if data == None:\n print('asfasfasasgasgasg')",
"asfasfasasgasgasg\n"
],
[
"d = {1: 'one', 2: 'two', 'stirng': 'fasfasgas'}",
"_____no_output_____"
],
[
"d.items()",
"_____no_output_____"
],
[
"list(d.items())",
"_____no_output_____"
],
[
"list(d.keys())",
"_____no_output_____"
],
[
"list(d.values())",
"_____no_output_____"
],
[
"for key, value in d.items():\n print('key:{}, value:{}'.format(key, value))",
"key:1, value:one\nkey:2, value:two\nkey:stirng, value:fasfasgas\n"
],
[
"a, b = 1,2",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"a, b = (1,2)",
"_____no_output_____"
],
[
"a",
"_____no_output_____"
],
[
"b",
"_____no_output_____"
],
[
"a, b = (1,2, 4)",
"_____no_output_____"
],
[
"list(1,2,3,4)",
"_____no_output_____"
],
[
"dir(d)",
"_____no_output_____"
],
[
"bool(0)",
"_____no_output_____"
],
[
"bool(1)",
"_____no_output_____"
],
[
"bool(5)",
"_____no_output_____"
],
[
"bool(-5) ",
"_____no_output_____"
],
[
"bool([])",
"_____no_output_____"
],
[
"bool([1])",
"_____no_output_____"
],
[
"bool({1})",
"_____no_output_____"
],
[
"bool({})",
"_____no_output_____"
],
[
"int(True)",
"_____no_output_____"
],
[
"int(False)",
"_____no_output_____"
],
[
"bool(' ') ",
"_____no_output_____"
],
[
"bool('') ",
"_____no_output_____"
],
[
"t = tuple('asdfghj')",
"_____no_output_____"
],
[
"t",
"_____no_output_____"
],
[
"t[0] = 42",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"l[0] = 42",
"_____no_output_____"
],
[
"l",
"_____no_output_____"
],
[
"(1+2) + (1+2) ",
"_____no_output_____"
],
[
"set('aaaaaasdsfghgjhghfgdfsdfdgsfdgasgasgagdbelrihvwoivspqevj')",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fb74a8a154b29dfa0c3785fd22919b1fad3103 | 49,318 | ipynb | Jupyter Notebook | Pygame-master/Chrome_Dinosaur_Game/MACHINE_LEARNING.ipynb | professorjar/curso-de-jogos- | e20bd2ec1af76d72efd8a3485fe6ffd6eb674ea2 | [
"MIT"
] | null | null | null | Pygame-master/Chrome_Dinosaur_Game/MACHINE_LEARNING.ipynb | professorjar/curso-de-jogos- | e20bd2ec1af76d72efd8a3485fe6ffd6eb674ea2 | [
"MIT"
] | null | null | null | Pygame-master/Chrome_Dinosaur_Game/MACHINE_LEARNING.ipynb | professorjar/curso-de-jogos- | e20bd2ec1af76d72efd8a3485fe6ffd6eb674ea2 | [
"MIT"
] | null | null | null | 89.669091 | 12,784 | 0.853319 | [
[
[
"## Importing the images into this script",
"_____no_output_____"
]
],
[
[
"import os\nimport numpy as np\n\ndirectory = 'C:/Users/joaovitor/Desktop/Meu_Canal/DINO/'\njump_img = os.listdir(os.path.join(directory, 'jump'))\nnojump_img = os.listdir(os.path.join(directory, 'no_jump'))\n\n#checking if the number of images in both directories are equals\nprint(len(jump_img) == len(nojump_img))\nprint(len(jump_img))",
"False\n81\n"
]
],
[
[
"## Storing the images array into lists",
"_____no_output_____"
]
],
[
[
"import cv2\n\nimgs_list_jump = []\nimgs_list_nojump = []\n\nfor img in jump_img:\n images = cv2.imread(os.path.join(directory, 'jump', img), 0) #0 to convert the image to grayscale\n imgs_list_jump.append(images)\n \nfor img in nojump_img:\n images = cv2.imread(os.path.join(directory, 'no_jump', img), 0) #0 to convert the image to grayscale\n imgs_list_nojump.append(images) \n\n#Taking a look at the first img of array_imgs_jump list\nprint(imgs_list_jump[0])\nprint(50*'=')\nprint('Images Dimensions:', imgs_list_jump[0].shape)",
"[[0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n ...\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]\n [0 0 0 ... 0 0 0]]\n==================================================\nImages Dimensions: (480, 640)\n"
]
],
[
[
"## Let's display the first image",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n\nimg = cv2.cvtColor(imgs_list_jump[0], cv2.COLOR_BGR2RGB)\nplt.imshow(img)\nplt.show()",
"_____no_output_____"
]
],
[
[
"## The images have 480 pixels height and 640 pixels width",
"_____no_output_____"
]
],
[
[
"print(imgs_list_jump[0].shape)",
"(480, 640)\n"
]
],
[
[
"## The images sizes still very big, so we are going to resize all images in order to make them smaller",
"_____no_output_____"
]
],
[
[
"print('Original size:', imgs_list_jump[0].size) #original size",
"Original size: 307200\n"
]
],
[
[
"## We will apply the code bellow to all images",
"_____no_output_____"
]
],
[
[
"scale_percent = 20 #20 percent of original size\n\nwidth = int(imgs_list_jump[0].shape[1] * scale_percent / 100)\nheight = int(imgs_list_jump[0].shape[0] * scale_percent / 100)\n\ndim = (width, height)\n\n#resize image\nresized = cv2.resize(imgs_list_jump[0], dim, interpolation = cv2.INTER_AREA)\n\nprint('Original Dimensions:', imgs_list_jump[0].shape)\nprint('Resized Dimensions:', resized.shape)\n\nimg = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)\nplt.imshow(img)\nplt.show()",
"Original Dimensions: (480, 640)\nResized Dimensions: (96, 128)\n"
]
],
[
[
"## Applying to all images",
"_____no_output_____"
]
],
[
[
"scale_percent = 20 # 20 percent of original size\nresized_jump_list = []\nresized_nojump_list = []\n\nfor img in imgs_list_jump:\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n\n dim = (width, height)\n\n #resize image\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n resized_jump_list.append(resized)\n\nfor img in imgs_list_nojump:\n width = int(img.shape[1] * scale_percent / 100)\n height = int(img.shape[0] * scale_percent / 100)\n \n dim = (width, height)\n \n #resize image\n resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)\n resized_nojump_list.append(resized)\n\n#Checking if it worked:\nprint(resized_jump_list[0].shape)\nprint(resized_nojump_list[0].shape)\n\nimg = cv2.cvtColor(resized_nojump_list[10], cv2.COLOR_BGR2RGB)\nplt.imshow(img)\nplt.show()\n\ncv2.imwrite('imagem_resized.png', resized_nojump_list[10])",
"(96, 128)\n(96, 128)\n"
]
],
[
[
"## Creating my X dataset",
"_____no_output_____"
]
],
[
[
"nojump_list_reshaped = []\njump_list_reshaped = []\n\nfor img in resized_nojump_list:\n nojump_list_reshaped.append(img.reshape(-1, img.size))\n\nfor img in resized_jump_list:\n jump_list_reshaped.append(img.reshape(-1, img.size))\n\nX_nojump = np.array(nojump_list_reshaped).reshape(len(nojump_list_reshaped), nojump_list_reshaped[0].size)\nX_jump = np.array(jump_list_reshaped).reshape(len(jump_list_reshaped), jump_list_reshaped[0].size)\n\nprint(X_nojump.shape)\nprint(X_jump.shape)",
"(386, 12288)\n(81, 12288)\n"
]
],
[
[
"## Joining both X's",
"_____no_output_____"
]
],
[
[
"X = np.vstack([X_nojump, X_jump])\nprint(X.shape)",
"(467, 12288)\n"
]
],
[
[
"## Creating my Y dataset",
"_____no_output_____"
]
],
[
[
"y_nojump = np.array([0 for i in range(len(nojump_list_reshaped))]).reshape(len(nojump_list_reshaped),-1)\ny_jump = np.array([1 for i in range(len(jump_list_reshaped))]).reshape(len(jump_list_reshaped),-1)",
"_____no_output_____"
]
],
[
[
"## Joining both Y's",
"_____no_output_____"
]
],
[
[
"y = np.vstack([y_nojump, y_jump])\nprint(y.shape)",
"(467, 1)\n"
]
],
[
[
"## Shuffling both datasets",
"_____no_output_____"
]
],
[
[
"shuffle_index = np.random.permutation(y.shape[0])\n#print(shuffle_index)\nX, y = X[shuffle_index], y[shuffle_index]",
"_____no_output_____"
]
],
[
[
"## Creating a X_train and y_train dataset",
"_____no_output_____"
]
],
[
[
"X_train = X\ny_train = y",
"_____no_output_____"
]
],
[
[
"## Choosing SVM (Support Vector Machine) as our Machine Learning model",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\nsvm_clf = SVC(kernel='linear')\nsvm_clf.fit(X_train, y_train.ravel())",
"_____no_output_____"
]
],
[
[
"## Creating a confusion matrix to evaluate the model performance",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import cross_val_predict\nfrom sklearn.metrics import confusion_matrix\n\ny_train_pred = cross_val_predict(svm_clf, X_train, y_train.ravel(), cv=3) #sgd_clf no primeiro parametro\nconfusion_matrix(y_train.ravel(), y_train_pred)",
"_____no_output_____"
]
],
[
[
"## Saving the model",
"_____no_output_____"
]
],
[
[
"import joblib\n\njoblib.dump(svm_clf, 'jump_model.pkl') #sgd_clf no primeiro parametro",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fbb50167c81ad7a08e34d87acef609e3a4dec1 | 50,723 | ipynb | Jupyter Notebook | climate_starter.ipynb | tanmayrp/sqlalchemy-challenge | bbe4c7e60581851cb4195775f1a032869642caf2 | [
"ADSL"
] | null | null | null | climate_starter.ipynb | tanmayrp/sqlalchemy-challenge | bbe4c7e60581851cb4195775f1a032869642caf2 | [
"ADSL"
] | null | null | null | climate_starter.ipynb | tanmayrp/sqlalchemy-challenge | bbe4c7e60581851cb4195775f1a032869642caf2 | [
"ADSL"
] | null | null | null | 107.010549 | 23,828 | 0.861996 | [
[
[
"%matplotlib inline\nfrom matplotlib import style\nstyle.use('fivethirtyeight')\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"import numpy as np\nimport pandas as pd\nimport datetime as dt",
"_____no_output_____"
]
],
[
[
"# Reflect Tables into SQLAlchemy ORM",
"_____no_output_____"
]
],
[
[
"# Python SQL toolkit and Object Relational Mapper\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func",
"_____no_output_____"
],
[
"# create engine to hawaii.sqlite\nengine = create_engine(\"sqlite:///hawaii.sqlite\")",
"_____no_output_____"
],
[
"# reflect an existing database into a new model\nBase = automap_base()\n\n# reflect the tables\nBase.prepare(engine, reflect=True)",
"_____no_output_____"
],
[
"# View all of the classes that automap found\nBase.classes.keys()",
"_____no_output_____"
],
[
"# Save references to each table\nMeasurement = Base.classes.measurement\nStation = Base.classes.station",
"_____no_output_____"
],
[
"# Create our session (link) from Python to the DB\nsession = Session(engine)",
"_____no_output_____"
]
],
[
[
"# Exploratory Precipitation Analysis",
"_____no_output_____"
]
],
[
[
"# Find the most recent date in the data set.\nmost_recent_date_str = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\nprint(f\"The most recent date in the data set: {most_recent_date_str[0]}\")",
"The most recent date in the data set: 2017-08-23\n"
],
[
"# Design a query to retrieve the last 12 months of precipitation data and plot the results. \n# Starting from the most recent data point in the database. \nmost_recent_date = dt.datetime.strptime(most_recent_date_str[0], '%Y-%m-%d')\n\n# Calculate the date one year from the last date in data set.\nrecent_date_one_year_past = dt.date(most_recent_date.year -1, most_recent_date.month, most_recent_date.day)\n\n# Perform a query to retrieve the data and precipitation scores\nsel = [Measurement.date, Measurement.prcp]\nresult = session.query(*sel).\\\n filter(Measurement.date >= recent_date_one_year_past).all()\n\n# Save the query results as a Pandas DataFrame and set the index to the date column\nprecipitation_df = pd.DataFrame(result, columns=[\"Date\", \"Precipitation\"])\nprecipitation_df = precipitation_df.set_index(\"Date\")\n\n# Sort the dataframe by date\nprecipitation_df = precipitation_df.sort_values([\"Date\"], ascending=True)\nprecipitation_df.head()\n\n# Use Pandas Plotting with Matplotlib to plot the data\n\nx_axis = precipitation_df.index.tolist()\ny_axis = precipitation_df['Precipitation'].tolist()\n\nplt.figure(figsize=(10,7))\nplt.bar(x_axis, y_axis, width = 5, align=\"center\",label='precipitation')\n\nmajor_ticks = np.arange(0,400,45)\nplt.xticks(major_ticks, rotation=90)\n\nplt.xlabel(\"Date\")\nplt.ylabel(\"Inches\")\n\nplt.legend()\nplt.show()",
"_____no_output_____"
],
[
"# Use Pandas to calcualte the summary statistics for the precipitation data\nprecipitation_df.describe()",
"_____no_output_____"
]
],
[
[
"# Exploratory Station Analysis",
"_____no_output_____"
]
],
[
[
"# Design a query to calculate the total number stations in the dataset\nprint(f\"The number of stations in the dataset: {session.query(Station.id).count()} \");",
"The number of stations in the dataset: 9 \n"
],
[
"# Design a query to find the most active stations (i.e. what stations have the most rows?)\n# List the stations and the counts in descending order.\nmost_active_stations = session.query(Measurement.station, func.count(Measurement.id)).\\\n group_by(Measurement.station).\\\n order_by(func.count(Measurement.id).desc()).all()\n\nmost_active_station = most_active_stations[0][0]\nprint(f\"The most active station is: {most_active_station}\")",
"The most active station is: USC00519281\n"
],
[
"# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.\nsel = [func.min(Measurement.tobs), func.max(Measurement.tobs), func.avg(Measurement.tobs)]\nmost_active_station_summary_stats = session.query(*sel).\\\n filter(Measurement.station == most_active_station).all()\n\nmost_active_station_summary_stats",
"_____no_output_____"
],
[
"# Using the most active station id\n# Query the last 12 months of temperature observation data for this station and plot the results as a histogram\n\nsel = [Measurement.date, Measurement.prcp]\nresult = session.query(Measurement.tobs).\\\n filter(Measurement.date >= recent_date_one_year_past).\\\n filter(Measurement.station == most_active_station).all()\n\nfig, ax = plt.subplots()\n\nplt.hist(list(np.ravel(result)), bins=12, label=\"tobs\")\n\nplt.xlabel(\"Temperature\")\nplt.ylabel(\"Frequency\")\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Close session",
"_____no_output_____"
]
],
[
[
"# Close Session\nsession.close()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fbb947b8aab4a1aa8b127ab35413260a98aa3e | 2,144 | ipynb | Jupyter Notebook | joiner.ipynb | datastory/CFS-order-generator | 65dd70d6bef5650030a22a51f5813b4cb5cc89b0 | [
"MIT"
] | null | null | null | joiner.ipynb | datastory/CFS-order-generator | 65dd70d6bef5650030a22a51f5813b4cb5cc89b0 | [
"MIT"
] | null | null | null | joiner.ipynb | datastory/CFS-order-generator | 65dd70d6bef5650030a22a51f5813b4cb5cc89b0 | [
"MIT"
] | null | null | null | 24.089888 | 112 | 0.489272 | [
[
[
"from random import randint\nimport os\nfrom pydub import AudioSegment",
"_____no_output_____"
],
[
"def randomizer():\n tracks = []\n tracks.append('sil_' + str(randint(3, 6)))\n\n count = 1\n while (count < randint(4, 5)):\n count = count + 1\n tracks.append('povel_' + str(randint(1, 6)))\n\n tracks.append('sil_' + str(randint(3, 6)))\n return tracks",
"_____no_output_____"
],
[
"sest = randomizer()\nname = 'pov_' + '_'.join(sest[1:-1]).replace('povel_', '') + '.mp3'\n\n#3 sec. time between calls\nsest.insert(2, 'sil_3')\nsest.insert(4, 'sil_3')\nif len(sest) == 8:\n sest.insert(6, 'sil_3')\n \nif name not in os.listdir('./povely/'):\n combined = 'x'\n for file in sest:\n if combined == 'x':\n combined = AudioSegment.from_wav('./audio/' + file + '.wav')\n else:\n combined = combined + AudioSegment.from_wav('./audio/' + file + '.wav')\n\n combined.export('./povely/' + name, format='mp3', tags={'artist': 'CFS', 'album': 'Střelecké povely'})",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7fbbb7b2a23ed965663499c0dc4ef2ac0bfa2ea | 25,265 | ipynb | Jupyter Notebook | courses/ml/logistic_regression.ipynb | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 1 | 2020-10-29T11:26:00.000Z | 2020-10-29T11:26:00.000Z | courses/ml/logistic_regression.ipynb | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 5 | 2021-03-18T21:33:45.000Z | 2022-03-11T23:34:50.000Z | courses/ml/logistic_regression.ipynb | obs145628/ml-notebooks | 08a64962e106ec569039ab204a7ae4c900783b6b | [
"MIT"
] | 1 | 2019-12-23T21:50:02.000Z | 2019-12-23T21:50:02.000Z | 32.143766 | 233 | 0.536513 | [
[
[
"import sys\nsys.path.append('../../pyutils')\n\nimport numpy as np\nimport scipy.linalg\nimport torch\n\nimport metrics\nimport utils\nfrom sklearn.linear_model import LogisticRegression\n\nnp.random.seed(12)",
"_____no_output_____"
]
],
[
[
"# Binary Logistic Regression\n\nLet $X$ training input of size $n * p$. \nIt contains $n$ examples, each with $p$ features. \nLet $y$ training target of size $n$. \nEach input $X_i$, vector of size $p$, is associated with it's target, $y_i$, which is $0$ or $1$. \nLogistic regression tries to fit a linear model to predict the target $y$ of a new input vector $x$.",
"_____no_output_____"
],
[
"The predictions of the model are denoted $\\hat{y}$.\n$$o_i = X_i\\beta = \\sum_{j=1}^{p} X_{ij}\\beta_j$$\n$$P(y_i = 1 | X_i) = \\hat{y_i} = \\sigma(o_i)$$\n$$\\sigma(x) = \\frac{1}{1 + e^{-x}}$$",
"_____no_output_____"
],
[
"## Cross Entropy\n\nThe cost function is the cross-entropy. \n$$J(\\beta) = - \\sum_{i=1}^n (y_i log(\\hat{y_i}) + (1 - y_i) log(1 - \\hat{y_i}))$$",
"_____no_output_____"
],
[
"$$\\frac{\\partial J(\\beta)}{\\partial \\hat{y_i}} = \\frac{\\hat{y_i} - y_i}{\\hat{y_i}(1 - \\hat{y_i})}$$\n$$\\frac{\\partial J(\\beta)}{\\partial \\hat{y}} = \\frac{\\hat{y} - y}{\\hat{y}(1 - \\hat{y})}$$",
"_____no_output_____"
]
],
[
[
"def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ny_out = np.random.randn(13).astype(np.float32)\ny_true = np.random.randint(0, 2, (13)).astype(np.float32)\ny_pred = sigmoid(y_out)\nj = - np.sum(y_true * np.log(y_pred) + (1-y_true) * np.log(1-y_pred))\n\nty_true = torch.tensor(y_true, requires_grad=False)\nty_pred = torch.tensor(y_pred, requires_grad=True)\ncriterion = torch.nn.BCELoss(reduction='sum')\ntj = criterion(ty_pred, ty_true)\ntj.backward()\n\nprint(j)\nprint(tj.data.numpy())\nprint(metrics.tdist(j, tj.data.numpy()))",
"10.678722\n10.678722\n0.0\n"
],
[
"dy_pred = (y_pred - y_true) / (y_pred * (1 - y_pred))\ntdy_pred_sol = ty_pred.grad.data.numpy()\nprint(dy_pred)\nprint(tdy_pred_sol)\nprint(metrics.tdist(dy_pred, tdy_pred_sol))",
"[-1.6231388 -2.9766939 2.274354 -6.4779763 -1.4708843 1.2155157\n -1.9948862 1.8867183 1.4462028 18.669147 1.5500078 -1.6234685\n -1.3342199]\n[-1.6231389 -2.976694 2.274354 -6.477976 -1.4708843 1.2155157\n -1.9948862 1.8867184 1.4462028 18.669147 1.5500077 -1.6234685\n -1.3342199]\n5.717077e-07\n"
]
],
[
[
"$$\\frac{\\partial J(\\beta)}{\\partial o_i} = \\hat{y_i} - y_i$$\n$$\\frac{\\partial J(\\beta)}{\\partial o} = \\hat{y} - y$$",
"_____no_output_____"
]
],
[
[
"y_out = np.random.randn(13).astype(np.float32)\ny_true = np.random.randint(0, 2, (13)).astype(np.float32)\ny_pred = sigmoid(y_out)\nj = - np.sum(y_true * np.log(y_pred) + (1-y_true) * np.log(1-y_pred))\n\nty_true = torch.tensor(y_true, requires_grad=False)\nty_out = torch.tensor(y_out, requires_grad=True)\ncriterion = torch.nn.BCEWithLogitsLoss(reduction='sum')\ntj = criterion(ty_out, ty_true)\ntj.backward()\n\nprint(j)\nprint(tj.data.numpy())\nprint(metrics.tdist(j, tj.data.numpy()))",
"10.849605\n10.849605\n0.0\n"
],
[
"dy_out = y_pred - y_true\ndy_out_sol = ty_out.grad.data.numpy()\nprint(dy_out)\nprint(dy_out_sol)\nprint(metrics.tdist(dy_out, dy_out_sol))",
"[-0.7712122 0.5310385 -0.7378207 -0.13447696 0.20648097 0.28622478\n -0.7465389 0.5608791 0.53383535 -0.75912154 -0.4418677 0.6848638\n 0.35961235]\n[-0.7712122 0.5310385 -0.7378207 -0.13447696 0.20648097 0.28622478\n -0.7465389 0.5608791 0.53383535 -0.75912154 -0.4418677 0.6848638\n 0.35961235]\n0.0\n"
]
],
[
[
"Can be trained with gradient descent",
"_____no_output_____"
]
],
[
[
"def log_reg_sk(X, y):\n \n m = LogisticRegression(fit_intercept=False)\n m.fit(X, y)\n return m.coef_\n\ndef get_error(X, y, w):\n y_pred = sigmoid(X @ w)\n err = - np.sum(y * np.log(y_pred) + (1-y) * np.log(1-y_pred))\n return err\n\ndef log_reg(X, y):\n\n w = np.random.randn(X.shape[1])\n \n for epoch in range(10000):\n \n y_pred = sigmoid(X @ w)\n dy_out = y_pred - y\n dw = X.T @ dy_out\n \n w -= 0.001 * dw\n if epoch % 100 == 0:\n err = get_error(X, y, w)\n print('SGD Error = {}'.format(err))\n \n return w\n \n\n\nX = np.random.randn(73, 4).astype(np.float32)\ny = np.random.randint(0, 2, (73)).astype(np.float32)\n\n \nw1 = log_reg_sk(X, y)[0]\nw2 = log_reg(X, y)\nprint('SK Error = {}'.format(get_error(X, y, w1)))\nprint('SGD Error = {}'.format(get_error(X, y, w2)))\nprint(w1)\nprint(w2)",
"SGD Error = 71.14744133609668\nSGD Error = 49.65028785288255\nSGD Error = 48.91772028291884\nSGD Error = 48.888462052036814\nSGD Error = 48.88680421514018\nSGD Error = 48.88669058552164\nSGD Error = 48.88668168135676\nSGD Error = 48.886680916022215\nSGD Error = 48.88668084643879\nSGD Error = 48.88668083991474\nSGD Error = 48.886680839293305\nSGD Error = 48.88668083923365\nSGD Error = 48.8866808392279\nSGD Error = 48.886680839227346\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922729\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\nSGD Error = 48.88668083922728\n"
]
],
[
[
"## Multiclass Logistic Regression",
"_____no_output_____"
]
],
[
[
"def softmax(x):\n x_e = np.exp(x)\n return x_e / np.sum(x_e, axis=1, keepdims=True)",
"_____no_output_____"
],
[
"y_out = np.random.randn(93, 4).astype(np.float32)\ny_true = np.zeros((93, 4)).astype(np.float32)\nfor i in range(y_true.shape[0]):\n y_true[i][np.random.randint(0, y_true.shape[1])] = 1\ny_pred = softmax(y_out)\n\nj = - np.sum(y_true * np.log(y_pred))\n\nty_true = torch.tensor(y_true, requires_grad=False)\nty_true = torch.argmax(ty_true, dim=1)\nty_out = torch.tensor(y_out, requires_grad=True)\n\ncriterion = torch.nn.CrossEntropyLoss(reduction='sum')\ntj = criterion(ty_out, ty_true)\ntj.backward()\n\nprint(j)\nprint(tj.data.numpy())\nprint(metrics.tdist(j, tj.data.numpy()))",
"148.84998\n148.85\n3.0517578e-05\n"
],
[
"y_out = np.random.randn(7, 4).astype(np.float32)\ny_true = np.zeros((7, 4)).astype(np.float32)\nfor i in range(y_true.shape[0]):\n y_true[i][np.random.randint(0, y_true.shape[1])] = 1\ny_pred = softmax(y_out)\n\nj = - np.sum(y_true * np.log(y_pred))\n\nty_true = torch.tensor(y_true, requires_grad=False)\nty_pred = torch.tensor(y_pred, requires_grad=True)\ntj = - torch.sum(ty_true * torch.log(ty_pred))\ntj.backward()\n\nprint(j)\nprint(tj.data.numpy())\nprint(metrics.tdist(j, tj.data.numpy()))",
"14.296462\n14.296461\n9.536743e-07\n"
],
[
"dy_pred = - y_true / y_pred\ndy_pred_sol = ty_pred.grad.data.numpy()\n\nprint(dy_pred)\nprint(dy_pred_sol)\nprint(metrics.tdist(dy_pred, dy_pred_sol))",
"[[ -0. -10.283339 -0. -0. ]\n [-10.58094 -0. -0. -0. ]\n [ -0. -0. -2.7528124 -0. ]\n [-46.90987 -0. -0. -0. ]\n [ -0. -0. -1.3170731 -0. ]\n [ -7.9531765 -0. -0. -0. ]\n [ -0. -10.990683 -0. -0. ]]\n[[ -0. -10.283339 -0. -0. ]\n [-10.58094 -0. -0. -0. ]\n [ -0. -0. -2.7528124 -0. ]\n [-46.90987 -0. -0. -0. ]\n [ -0. -0. -1.3170731 -0. ]\n [ -7.9531765 -0. -0. -0. ]\n [ -0. -10.990683 -0. -0. ]]\n0.0\n"
]
],
[
[
"$$\\frac{\\partial J(\\beta)}{\\partial o_{ij}} = \\hat{y_{ij}} - y_{ij}$$\n$$\\frac{\\partial J(\\beta)}{\\partial o} = \\hat{y} - y$$",
"_____no_output_____"
]
],
[
[
"y_out = np.random.randn(7, 4).astype(np.float32)\ny_true = np.zeros((7, 4)).astype(np.float32)\nfor i in range(y_true.shape[0]):\n y_true[i][np.random.randint(0, y_true.shape[1])] = 1\ny_pred = softmax(y_out)\n\nj = - np.sum(y_true * np.log(y_pred))\n\nty_true = torch.tensor(y_true, requires_grad=False)\nty_true = torch.argmax(ty_true, dim=1)\nty_out = torch.tensor(y_out, requires_grad=True)\n\ncriterion = torch.nn.CrossEntropyLoss(reduction='sum')\ntj = criterion(ty_out, ty_true)\ntj.backward()\n\nprint(j)\nprint(tj.data.numpy())\nprint(metrics.tdist(j, tj.data.numpy()))",
"12.387552\n12.387553\n9.536743e-07\n"
],
[
"dy_out = y_pred - y_true\ndy_out_sol = ty_out.grad.data.numpy()\n\nprint(dy_out)\nprint(dy_out_sol)\nprint(metrics.tdist(dy_out, dy_out_sol))",
"[[-0.71088123 0.25399554 0.31700996 0.13987577]\n [ 0.02140404 0.3097546 0.29681578 -0.6279745 ]\n [ 0.60384715 0.03253903 0.0066169 -0.6430031 ]\n [ 0.22169167 -0.88766754 0.03120301 0.63477284]\n [ 0.05100057 -0.38170385 0.10363309 0.22707026]\n [ 0.02778155 0.6928965 -0.8194856 0.09880757]\n [ 0.03780703 0.9247614 0.02876937 -0.99133784]]\n[[-0.71088123 0.2539955 0.31700993 0.13987575]\n [ 0.02140405 0.30975467 0.29681584 -0.6279744 ]\n [ 0.60384715 0.03253903 0.0066169 -0.6430031 ]\n [ 0.22169165 -0.88766754 0.03120301 0.6347728 ]\n [ 0.05100057 -0.38170385 0.10363309 0.22707026]\n [ 0.02778155 0.6928965 -0.8194856 0.09880759]\n [ 0.03780702 0.9247613 0.02876936 -0.99133784]]\n2.0499465e-07\n"
]
],
[
[
"Can be trained with gradient descent",
"_____no_output_____"
]
],
[
[
"def get_error_multi(X, y, w):\n y_pred = softmax(X @ w)\n err = - np.sum(y * np.log(y_pred))\n return err\n\n\ndef multilog_reg(X, y):\n\n w = np.random.randn(X.shape[1], y.shape[1])\n \n for epoch in range(10000):\n \n y_pred = softmax(X @ w)\n dy_out = y_pred - y\n dw = X.T @ dy_out\n \n w -= 0.001 * dw\n if epoch % 100 == 0:\n err = get_error_multi(X, y, w)\n print('SGD Error = {}'.format(err))\n \n return w\n\n \nX = np.random.randn(93, 4).astype(np.float32)\ny_true = np.zeros((93, 4)).astype(np.float32)\nfor i in range(y_true.shape[0]):\n y_true[i][np.random.randint(0, y_true.shape[1])] = 1\ny_true_sk = np.argmax(y_true, axis=1)\n \nw1 = log_reg_sk(X, y_true_sk)\nw2 = multilog_reg(X, y_true)\nprint('SK Error = {}'.format(get_error_multi(X, y_true, w1)))\nprint('SGD Error = {}'.format(get_error_multi(X, y_true, w2)))\nprint(w1)\nprint(w2)",
"SGD Error = 264.5967568728954\nSGD Error = 124.52928999771657\nSGD Error = 120.69338069535253\nSGD Error = 120.60511291188504\nSGD Error = 120.60208822782775\nSGD Error = 120.60195961583351\nSGD Error = 120.60195360857097\nSGD Error = 120.60195331813674\nSGD Error = 120.60195330392729\nSGD Error = 120.60195330322918\nSGD Error = 120.60195330319483\nSGD Error = 120.60195330319314\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\nSGD Error = 120.60195330319306\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fbbb94f04a170a144822dfe9bf76d50a7b0190 | 17,040 | ipynb | Jupyter Notebook | pria_lifechem/analysis/scaffold/scaffold_Keck_Pria_FP_data.ipynb | chao1224/pria_lifechem | 1fd892505a45695c6197f8d711a8a37589cd7097 | [
"MIT"
] | 5 | 2018-05-14T10:15:13.000Z | 2021-03-15T17:18:10.000Z | pria_lifechem/analysis/scaffold/scaffold_Keck_Pria_FP_data.ipynb | chao1224/pria_lifechem | 1fd892505a45695c6197f8d711a8a37589cd7097 | [
"MIT"
] | 5 | 2018-05-05T21:04:11.000Z | 2019-06-24T22:05:35.000Z | pria_lifechem/analysis/scaffold/scaffold_Keck_Pria_FP_data.ipynb | chao1224/pria_lifechem | 1fd892505a45695c6197f8d711a8a37589cd7097 | [
"MIT"
] | 2 | 2019-10-18T23:42:27.000Z | 2020-07-08T19:46:14.000Z | 31.497227 | 386 | 0.578286 | [
[
[
"import os\nfrom virtual_screening.function import read_merged_data\nfrom rdkit.Chem.Scaffolds import MurckoScaffold\nfrom rdkit import Chem",
"/home/sliu426/.local/lib/python2.7/site-packages/sklearn/cross_validation.py:41: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.\n \"This module will be removed in 0.20.\", DeprecationWarning)\n"
],
[
"data_path = '../../../dataset/fixed_dataset/fold_5/file_{}.csv'\nk = 5\n\noutput_file_list = [data_path.format(i) for i in range(k)]",
"_____no_output_____"
],
[
"data_pd_list = []\nfor i in range(k):\n temp_pd = read_merged_data(output_file_list[i:i+1])\n print temp_pd.shape\n data_pd_list.append(temp_pd)",
"(14486, 8)\n(14482, 8)\n(14484, 8)\n(14485, 8)\n(14486, 8)\n"
]
],
[
[
"# Scaffolds of Keck_Pria_FP_data",
"_____no_output_____"
]
],
[
[
"Target_name = 'Keck_Pria_FP_data'",
"_____no_output_____"
],
[
"smiles_list = []\nfor i in range(k):\n smiles_list.extend(data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist())\n\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1cc(cc2c1CCCN2CCOC)NS(=O)(=O)c3c(c(c(c(c3C)C)C)C)C\nThe Scaffold is \tO=S(=O)(Nc1ccc2c(c1)NCCC2)c1ccccc1\n\nOriginal SMILES is \tc1cc(ccc1CC)NC(=O)CSc2ncc(c(=O)[nH]2)S(=O)(=O)c3ccc(cc3C)C\nThe Scaffold is \tO=C(CSc1ncc(S(=O)(=O)c2ccccc2)c(=O)[nH]1)Nc1ccccc1\n\nOriginal SMILES is \tc1ccc2c(c1)c(c[nH]2)CCNC(=O)Cc3csc(n3)Nc4cccc(c4)Cl\nThe Scaffold is \tO=C(Cc1csc(Nc2ccccc2)n1)NCCc1c[nH]c2ccccc12\n\nOriginal SMILES is \tc1cc(ccc1c2nnc3n2CC(=C)S3)Br\nThe Scaffold is \tC=C1Cn2c(nnc2-c2ccccc2)S1\n\nOriginal SMILES is \tc1cc(cc2c1CCCN2CCOC)N\nThe Scaffold is \tc1ccc2c(c1)CCCN2\n\nOriginal SMILES is \tc1cc(cc(c1)Cl)Nc2nc(cs2)CC(=O)Nc3ccc4c(c3)OCCO4\nThe Scaffold is \tO=C(Cc1csc(Nc2ccccc2)n1)Nc1ccc2c(c1)OCCO2\n\nOriginal SMILES is \tc1cc(cc(c1NC(=O)c2c(nns2)C)[N+](=O)[O-])OCC\nThe Scaffold is \tO=C(Nc1ccccc1)c1cnns1\n\nOriginal SMILES is \tc1ccc2c(c1)ccn2CCNC(=S)NCCc3cc4ccc(cc4[nH]c3=O)C\nThe Scaffold is \tO=c1[nH]c2ccccc2cc1CCNC(=S)NCCn1ccc2ccccc21\n\nOriginal SMILES is \tc1ccc(cc1)OCC(=O)Nc2nc-3c(s2)-c4cccc5c4c3ccc5\nThe Scaffold is \tO=C(COc1ccccc1)Nc1nc2c(s1)-c1cccc3cccc-2c13\n\nOriginal SMILES is \tc1ccc(c(c1)C(=O)Nc2nnc(o2)Cc3cccs3)SCC\nThe Scaffold is \tO=C(Nc1nnc(Cc2cccs2)o1)c1ccccc1\n\nOriginal SMILES is \tc1cc(ccc1n2ccnc2SCC(=O)Nc3ccc(cc3)Br)F\nThe Scaffold is \tO=C(CSc1nccn1-c1ccccc1)Nc1ccccc1\n\nOriginal SMILES is \tc1cc2c(cc1C(=O)NCc3ccc4c(c3)cc(n4C)C)OCO2\nThe Scaffold is \tO=C(NCc1ccc2[nH]ccc2c1)c1ccc2c(c1)OCO2\n\nOriginal SMILES is \tc1ccc2c(c1)ccc(c2C=Nc3c(cccn3)O)O\nThe Scaffold is \tC(=Nc1ccccn1)c1cccc2ccccc12\n\nOriginal SMILES is \tc1cc(oc1)C(=O)Nc2ccc(cc2)Nc3ccc(nn3)n4cccn4\nThe Scaffold is \tO=C(Nc1ccc(Nc2ccc(-n3cccn3)nn2)cc1)c1ccco1\n\nOriginal SMILES is \tc1ccc(c(c1)C(=O)Nc2nc(cs2)c3ccccn3)Br\nThe Scaffold is \tO=C(Nc1nc(-c2ccccn2)cs1)c1ccccc1\n\nOriginal SMILES is \tc1ccc(cc1)C2=NN(C(C2)c3ccc4c(c3)nccn4)C(=O)c5cccs5\nThe Scaffold is \tO=C(c1cccs1)N1N=C(c2ccccc2)CC1c1ccc2nccnc2c1\n\nOriginal SMILES is \tc1cc(cc(c1)CS(=O)(=O)Nc2ccc3c(c2)N(CCC3)CCOC)C\nThe Scaffold is \tO=S(=O)(Cc1ccccc1)Nc1ccc2c(c1)NCCC2\n\nOriginal SMILES is \tc1c(onc1NC(=O)Cn2cccc(c2=O)c3nc(no3)C4CC4)C\nThe Scaffold is \tO=C(Cn1cccc(-c2nc(C3CC3)no2)c1=O)Nc1ccon1\n\nOriginal SMILES is \tc1cc(sc1)Cc2nnc(o2)NC(=O)c3ccc(cc3)S(=O)(=O)N(C)CCCC\nThe Scaffold is \tO=C(Nc1nnc(Cc2cccs2)o1)c1ccccc1\n\nOriginal SMILES is \tc1cc2cccnc2c(c1)SCC(=O)NCCc3ccc(cc3)Cl\nThe Scaffold is \tO=C(CSc1cccc2cccnc12)NCCc1ccccc1\n\nOriginal SMILES is \tc1cc(cc(c1)F)NC(=O)Nc2ccc(cc2)Nc3ccc(nn3)n4cccn4\nThe Scaffold is \tO=C(Nc1ccccc1)Nc1ccc(Nc2ccc(-n3cccn3)nn2)cc1\n\nOriginal SMILES is \tc1cc2cccc3c2c(c1)C(=O)N(C3=O)CCN4CCN(CC4)CC(=O)c5ccc(cc5)OC\nThe Scaffold is \tO=C(CN1CCN(CCN2C(=O)c3cccc4cccc(c34)C2=O)CC1)c1ccccc1\n\nOriginal SMILES is \tc1ccnc(c1)CN2Cc3c(ccc4c3OC(=Cc5cccc(c5)F)C4=O)OC2\nThe Scaffold is \tO=C1C(=Cc2ccccc2)Oc2c1ccc1c2CN(Cc2ccccn2)CO1\n\nOriginal SMILES is \tc1ccc(c(c1)c2c(c(on2)C)C(=O)NCCn3c4c(cn3)c(nc(n4)SCC)NCCC)Cl\nThe Scaffold is \tO=C(NCCn1ncc2cncnc21)c1conc1-c1ccccc1\n\n24 total smiles\n23 different scaffolds\n"
]
],
[
[
"# Below is scaffold for each fold",
"_____no_output_____"
],
[
"# Scaffold for fold 0",
"_____no_output_____"
]
],
[
[
"i = 0\n\nsmiles_list = data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist()\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1cc(cc2c1CCCN2CCOC)NS(=O)(=O)c3c(c(c(c(c3C)C)C)C)C\nThe Scaffold is \tO=S(=O)(Nc1ccc2c(c1)NCCC2)c1ccccc1\n\nOriginal SMILES is \tc1cc(ccc1CC)NC(=O)CSc2ncc(c(=O)[nH]2)S(=O)(=O)c3ccc(cc3C)C\nThe Scaffold is \tO=C(CSc1ncc(S(=O)(=O)c2ccccc2)c(=O)[nH]1)Nc1ccccc1\n\nOriginal SMILES is \tc1ccc2c(c1)c(c[nH]2)CCNC(=O)Cc3csc(n3)Nc4cccc(c4)Cl\nThe Scaffold is \tO=C(Cc1csc(Nc2ccccc2)n1)NCCc1c[nH]c2ccccc12\n\nOriginal SMILES is \tc1cc(ccc1c2nnc3n2CC(=C)S3)Br\nThe Scaffold is \tC=C1Cn2c(nnc2-c2ccccc2)S1\n\nOriginal SMILES is \tc1cc(cc2c1CCCN2CCOC)N\nThe Scaffold is \tc1ccc2c(c1)CCCN2\n\n5 total smiles\n5 different scaffolds\n"
]
],
[
[
"# Scaffold for fold 1",
"_____no_output_____"
]
],
[
[
"i = 1\n\nsmiles_list = data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist()\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1cc(cc(c1)Cl)Nc2nc(cs2)CC(=O)Nc3ccc4c(c3)OCCO4\nThe Scaffold is \tO=C(Cc1csc(Nc2ccccc2)n1)Nc1ccc2c(c1)OCCO2\n\nOriginal SMILES is \tc1cc(cc(c1NC(=O)c2c(nns2)C)[N+](=O)[O-])OCC\nThe Scaffold is \tO=C(Nc1ccccc1)c1cnns1\n\nOriginal SMILES is \tc1ccc2c(c1)ccn2CCNC(=S)NCCc3cc4ccc(cc4[nH]c3=O)C\nThe Scaffold is \tO=c1[nH]c2ccccc2cc1CCNC(=S)NCCn1ccc2ccccc21\n\nOriginal SMILES is \tc1ccc(cc1)OCC(=O)Nc2nc-3c(s2)-c4cccc5c4c3ccc5\nThe Scaffold is \tO=C(COc1ccccc1)Nc1nc2c(s1)-c1cccc3cccc-2c13\n\n4 total smiles\n4 different scaffolds\n"
]
],
[
[
"# Scaffold for fold 2",
"_____no_output_____"
]
],
[
[
"i = 2\n\nsmiles_list = data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist()\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1ccc(c(c1)C(=O)Nc2nnc(o2)Cc3cccs3)SCC\nThe Scaffold is \tO=C(Nc1nnc(Cc2cccs2)o1)c1ccccc1\n\nOriginal SMILES is \tc1cc(ccc1n2ccnc2SCC(=O)Nc3ccc(cc3)Br)F\nThe Scaffold is \tO=C(CSc1nccn1-c1ccccc1)Nc1ccccc1\n\nOriginal SMILES is \tc1cc2c(cc1C(=O)NCc3ccc4c(c3)cc(n4C)C)OCO2\nThe Scaffold is \tO=C(NCc1ccc2[nH]ccc2c1)c1ccc2c(c1)OCO2\n\nOriginal SMILES is \tc1ccc2c(c1)ccc(c2C=Nc3c(cccn3)O)O\nThe Scaffold is \tC(=Nc1ccccn1)c1cccc2ccccc12\n\n4 total smiles\n4 different scaffolds\n"
]
],
[
[
"# Scaffold for fold 3",
"_____no_output_____"
]
],
[
[
"i = 3\n\nsmiles_list = data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist()\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1cc(oc1)C(=O)Nc2ccc(cc2)Nc3ccc(nn3)n4cccn4\nThe Scaffold is \tO=C(Nc1ccc(Nc2ccc(-n3cccn3)nn2)cc1)c1ccco1\n\nOriginal SMILES is \tc1ccc(c(c1)C(=O)Nc2nc(cs2)c3ccccn3)Br\nThe Scaffold is \tO=C(Nc1nc(-c2ccccn2)cs1)c1ccccc1\n\nOriginal SMILES is \tc1ccc(cc1)C2=NN(C(C2)c3ccc4c(c3)nccn4)C(=O)c5cccs5\nThe Scaffold is \tO=C(c1cccs1)N1N=C(c2ccccc2)CC1c1ccc2nccnc2c1\n\nOriginal SMILES is \tc1cc(cc(c1)CS(=O)(=O)Nc2ccc3c(c2)N(CCC3)CCOC)C\nThe Scaffold is \tO=S(=O)(Cc1ccccc1)Nc1ccc2c(c1)NCCC2\n\nOriginal SMILES is \tc1c(onc1NC(=O)Cn2cccc(c2=O)c3nc(no3)C4CC4)C\nThe Scaffold is \tO=C(Cn1cccc(-c2nc(C3CC3)no2)c1=O)Nc1ccon1\n\n5 total smiles\n5 different scaffolds\n"
]
],
[
[
"# Scaffold for fold 4",
"_____no_output_____"
]
],
[
[
"i = 4\n\nsmiles_list = data_pd_list[i][data_pd_list[i][Target_name]==1]['SMILES'].tolist()\nscaffold_set = set()\nfor smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n core = MurckoScaffold.GetScaffoldForMol(mol)\n scaffold = Chem.MolToSmiles(core)\n scaffold_set.add(scaffold)\n print 'Original SMILES is \\t{}'.format(smiles)\n print 'The Scaffold is \\t{}'.format(scaffold)\n print\n\nprint '{} total smiles'.format(len(smiles_list))\nprint '{} different scaffolds'.format(len(scaffold_set))",
"Original SMILES is \tc1cc(sc1)Cc2nnc(o2)NC(=O)c3ccc(cc3)S(=O)(=O)N(C)CCCC\nThe Scaffold is \tO=C(Nc1nnc(Cc2cccs2)o1)c1ccccc1\n\nOriginal SMILES is \tc1cc2cccnc2c(c1)SCC(=O)NCCc3ccc(cc3)Cl\nThe Scaffold is \tO=C(CSc1cccc2cccnc12)NCCc1ccccc1\n\nOriginal SMILES is \tc1cc(cc(c1)F)NC(=O)Nc2ccc(cc2)Nc3ccc(nn3)n4cccn4\nThe Scaffold is \tO=C(Nc1ccccc1)Nc1ccc(Nc2ccc(-n3cccn3)nn2)cc1\n\nOriginal SMILES is \tc1cc2cccc3c2c(c1)C(=O)N(C3=O)CCN4CCN(CC4)CC(=O)c5ccc(cc5)OC\nThe Scaffold is \tO=C(CN1CCN(CCN2C(=O)c3cccc4cccc(c34)C2=O)CC1)c1ccccc1\n\nOriginal SMILES is \tc1ccnc(c1)CN2Cc3c(ccc4c3OC(=Cc5cccc(c5)F)C4=O)OC2\nThe Scaffold is \tO=C1C(=Cc2ccccc2)Oc2c1ccc1c2CN(Cc2ccccn2)CO1\n\nOriginal SMILES is \tc1ccc(c(c1)c2c(c(on2)C)C(=O)NCCn3c4c(cn3)c(nc(n4)SCC)NCCC)Cl\nThe Scaffold is \tO=C(NCCn1ncc2cncnc21)c1conc1-c1ccccc1\n\n6 total smiles\n6 different scaffolds\n"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fbbca8f7301a286d2c205e85acd1c80d3debc6 | 19,864 | ipynb | Jupyter Notebook | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai | f535b956f3f9c8ee1d85f014ebd9da517734a473 | [
"MIT"
] | null | null | null | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai | f535b956f3f9c8ee1d85f014ebd9da517734a473 | [
"MIT"
] | null | null | null | assignments/assignment3/PyTorch_CNN.ipynb | pavel2805/my_dlcoarse_ai | f535b956f3f9c8ee1d85f014ebd9da517734a473 | [
"MIT"
] | null | null | null | 31.086072 | 236 | 0.557038 | [
[
[
"# Задание 3.2 - сверточные нейронные сети (CNNs) в PyTorch\n\nЭто упражнение мы буде выполнять в Google Colab - https://colab.research.google.com/ \nGoogle Colab позволяет запускать код в notebook в облаке Google, где можно воспользоваться бесплатным GPU! \n\nАвторы курса благодарят компанию Google и надеятся, что праздник не закончится.\n\nТуториал по настройке Google Colab: \nhttps://medium.com/deep-learning-turkey/google-colab-free-gpu-tutorial-e113627b9f5d \n(Keras инсталлировать не нужно, наш notebook сам установит PyTorch)\n",
"_____no_output_____"
]
],
[
[
"# Intstall PyTorch and download data\n!pip3 install torch torchvision\n\n!wget -c http://ufldl.stanford.edu/housenumbers/train_32x32.mat http://ufldl.stanford.edu/housenumbers/test_32x32.mat",
"_____no_output_____"
],
[
"from collections import namedtuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torchvision.datasets as dset\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nfrom torchvision import transforms",
"_____no_output_____"
],
[
"device = torch.device(\"cuda:0\") # Let's make sure GPU is available!",
"_____no_output_____"
]
],
[
[
"# Загружаем данные",
"_____no_output_____"
]
],
[
[
"# First, lets load the dataset\ndata_train = dset.SVHN('./', \n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n ])\n )\ndata_test = dset.SVHN('./', split='test', transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n ]))",
"_____no_output_____"
]
],
[
[
"Разделяем данные на training и validation.\n\nНа всякий случай для подробностей - https://pytorch.org/tutorials/beginner/data_loading_tutorial.html",
"_____no_output_____"
]
],
[
[
"batch_size = 64\n\ndata_size = data_train.data.shape[0]\nvalidation_split = .2\nsplit = int(np.floor(validation_split * data_size))\nindices = list(range(data_size))\nnp.random.shuffle(indices)\n\ntrain_indices, val_indices = indices[split:], indices[:split]\n\ntrain_sampler = SubsetRandomSampler(train_indices)\nval_sampler = SubsetRandomSampler(val_indices)\n\ntrain_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size, \n sampler=train_sampler)\nval_loader = torch.utils.data.DataLoader(data_train, batch_size=batch_size,\n sampler=val_sampler)",
"_____no_output_____"
],
[
"# We'll use a special helper module to shape it into a flat tensor\nclass Flattener(nn.Module):\n def forward(self, x):\n batch_size, *_ = x.shape\n return x.view(batch_size, -1)",
"_____no_output_____"
]
],
[
[
"Создадим простейшую сеть с новыми слоями: \nConvolutional - `nn.Conv2d` \nMaxPool - `nn.MaxPool2d`",
"_____no_output_____"
]
],
[
[
"nn_model = nn.Sequential(\n nn.Conv2d(3, 64, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(4),\n nn.Conv2d(64, 64, 3, padding=1),\n nn.ReLU(inplace=True),\n nn.MaxPool2d(4), \n Flattener(),\n nn.Linear(64*2*2, 10),\n )\n\nnn_model.type(torch.cuda.FloatTensor)\nnn_model.to(device)\n\nloss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)\noptimizer = optim.SGD(nn_model.parameters(), lr=1e-1, weight_decay=1e-4)",
"_____no_output_____"
]
],
[
[
"Восстановите функцию `compute_accuracy` из прошлого задания. \nЕдинственное отличие в новом - она должна передать данные на GPU прежде чем прогонять через модель. Сделайте это так же, как это делает функция `train_model`",
"_____no_output_____"
]
],
[
[
"def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs): \n loss_history = []\n train_history = []\n val_history = []\n for epoch in range(num_epochs):\n model.train() # Enter train mode\n \n loss_accum = 0\n correct_samples = 0\n total_samples = 0\n for i_step, (x, y) in enumerate(train_loader):\n \n x_gpu = x.to(device)\n y_gpu = y.to(device)\n prediction = model(x_gpu) \n loss_value = loss(prediction, y_gpu)\n optimizer.zero_grad()\n loss_value.backward()\n optimizer.step()\n \n _, indices = torch.max(prediction, 1)\n correct_samples += torch.sum(indices == y_gpu)\n total_samples += y.shape[0]\n \n loss_accum += loss_value\n\n ave_loss = loss_accum / i_step\n train_accuracy = float(correct_samples) / total_samples\n val_accuracy = compute_accuracy(model, val_loader)\n \n loss_history.append(float(ave_loss))\n train_history.append(train_accuracy)\n val_history.append(val_accuracy)\n \n print(\"Average loss: %f, Train accuracy: %f, Val accuracy: %f\" % (ave_loss, train_accuracy, val_accuracy))\n \n return loss_history, train_history, val_history\n \ndef compute_accuracy(model, loader):\n \"\"\"\n Computes accuracy on the dataset wrapped in a loader\n \n Returns: accuracy as a float value between 0 and 1\n \"\"\"\n model.eval() # Evaluation mode\n # TODO: Copy implementation from previous assignment\n # Don't forget to move the data to device before running it through the model!\n \n raise Exception(\"Not implemented\")\n\nloss_history, train_history, val_history = train_model(nn_model, train_loader, val_loader, loss, optimizer, 5)",
"_____no_output_____"
]
],
[
[
"# Аугментация данных (Data augmentation)\n\nВ работе с изображениями одним из особенно важных методов является аугментация данных - то есть, генерация дополнительных данных для тренировки на основе изначальных. \nТаким образом, мы получаем возможность \"увеличить\" набор данных для тренировки, что ведет к лучшей работе сети.\nВажно, чтобы аугментированные данные были похожи на те, которые могут встретиться в реальной жизни, иначе польза от аугментаций уменьшается и может ухудшить работу сети.\n\nС PyTorch идут несколько таких алгоритмов, называемых `transforms`. Более подробно про них можно прочитать тут -\nhttps://pytorch.org/tutorials/beginner/data_loading_tutorial.html#transforms\n\nНиже мы используем следующие алгоритмы генерации:\n- ColorJitter - случайное изменение цвета\n- RandomHorizontalFlip - горизонтальное отражение с вероятностью 50%\n- RandomVerticalFlip - вертикальное отражение с вероятностью 50%\n- RandomRotation - случайный поворот",
"_____no_output_____"
]
],
[
[
"tfs = transforms.Compose([\n transforms.ColorJitter(hue=.50, saturation=.50),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation(50, resample=PIL.Image.BILINEAR),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n])\n\n# Create augmented train dataset\ndata_aug_train = dset.SVHN('./', \n transform=tfs\n )\n\ntrain_aug_loader = torch.utils.data.DataLoader(data_aug_train, batch_size=batch_size, \n sampler=train_sampler)",
"_____no_output_____"
]
],
[
[
"Визуализируем результаты агментации (вообще, смотреть на сгенерированные данные всегда очень полезно).",
"_____no_output_____"
]
],
[
[
"# TODO: Visualize some augmented images!\n# hint: you can create new datasets and loaders to accomplish this\n\n# Based on the visualizations, should we keep all the augmentations?\n\ntfs = transforms.Compose([\n transforms.ColorJitter(hue=.20, saturation=.20),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.RandomRotation(10, resample=PIL.Image.BILINEAR),\n])\n\ndata_aug_vis = dset.SVHN('./', \n transform=tfs\n )\n\nplt.figure(figsize=(30, 3))\n\nfor i, (x, y) in enumerate(data_aug_vis):\n if i == 10:\n break\n plt.subplot(1, 10, i+1)\n plt.grid(False)\n plt.imshow(x)\n plt.axis('off')",
"_____no_output_____"
]
],
[
[
"Все ли агментации одинаково полезны на этом наборе данных? Могут ли быть среди них те, которые собьют модель с толку?\n\nВыберите из них только корректные",
"_____no_output_____"
]
],
[
[
"# TODO: \ntfs = transforms.Compose([\n # TODO: Add good augmentations\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.43,0.44,0.47],\n std=[0.20,0.20,0.20]) \n])\n\n# TODO create new instances of loaders with the augmentations you chose\ntrain_aug_loader = None",
"_____no_output_____"
],
[
"# Finally, let's train with augmentations!\n\n# Note we shouldn't use augmentations on validation\n\nloss_history, train_history, val_history = train_model(nn_model, train_aug_loader, val_loader, loss, optimizer, 5)",
"_____no_output_____"
]
],
[
[
"# LeNet\nПопробуем имплементировать классическую архитектуру сверточной нейронной сети, предложенную Яном ЛеКуном в 1998 году. В свое время она достигла впечатляющих результатов на MNIST, посмотрим как она справится с SVHN?\nОна описана в статье [\"Gradient Based Learning Applied to Document Recognition\"](http://yann.lecun.com/exdb/publis/pdf/lecun-01a.pdf), попробуйте прочитать ключевые части и имплементировать предложенную архитетуру на PyTorch.\n\nРеализовывать слои и функцию ошибки LeNet, которых нет в PyTorch, **не нужно** - просто возьмите их размеры и переведите в уже известные нам Convolutional, Pooling и Fully Connected layers.\n\nЕсли в статье не очень понятно, можно просто погуглить LeNet и разобраться в деталях :)",
"_____no_output_____"
]
],
[
[
"# TODO: Implement LeNet-like architecture for SVHN task\nlenet_model = nn.Sequential(\n )\n\nlenet_model.type(torch.cuda.FloatTensor)\nlenet_model.to(device)\n\nloss = nn.CrossEntropyLoss().type(torch.cuda.FloatTensor)\noptimizer = optim.SGD(lenet_model.parameters(), lr=1e-1, weight_decay=1e-4)",
"_____no_output_____"
],
[
"# Let's train it!\nloss_history, train_history, val_history = train_model(lenet_model, train_aug_loader, val_loader, loss, optimizer, 10)",
"_____no_output_____"
]
],
[
[
"# Подбор гиперпараметров",
"_____no_output_____"
]
],
[
[
"# The key hyperparameters we're going to tune are learning speed, annealing rate and regularization\n# We also encourage you to try different optimizers as well\n\nHyperparams = namedtuple(\"Hyperparams\", ['learning_rate', 'anneal_epochs', 'reg'])\nRunResult = namedtuple(\"RunResult\", ['model', 'train_history', 'val_history', 'final_val_accuracy'])\n\nlearning_rates = [1e0, 1e-1, 1e-2, 1e-3, 1e-4]\nanneal_coeff = 0.2\nanneal_epochs = [1, 5, 10, 15, 20, 50]\nreg = [1e-3, 1e-4, 1e-5, 1e-7]\n\nbatch_size = 64\nepoch_num = 10\n\n# Record all the runs here\n# Key should be Hyperparams and values should be RunResult\nrun_record = {} \n\n# Use grid search or random search and record all runs in run_record dictionnary \n# Important: perform search in logarithmic space!\n\n# TODO: Your code here!",
"_____no_output_____"
],
[
"best_val_accuracy = None\nbest_hyperparams = None\nbest_run = None\n\nfor hyperparams, run_result in run_record.items():\n if best_val_accuracy is None or best_val_accuracy < run_result.final_val_accuracy:\n best_val_accuracy = run_result.final_val_accuracy\n best_hyperparams = hyperparams\n best_run = run_result\n \nprint(\"Best validation accuracy: %4.2f, best hyperparams: %s\" % (best_val_accuracy, best_hyperparams))\n ",
"_____no_output_____"
]
],
[
[
"# Свободное упражнение - догоним и перегоним LeNet!\n\nПопробуйте найти архитектуру и настройки тренировки, чтобы выступить лучше наших бейзлайнов.\n\nЧто можно и нужно попробовать:\n- BatchNormalization (для convolution layers он в PyTorch называется [batchnorm2d](https://pytorch.org/docs/stable/nn.html#batchnorm2d))\n- Изменить количество слоев и их толщину\n- Изменять количество эпох тренировки\n- Попробовать и другие агментации",
"_____no_output_____"
]
],
[
[
"best_model = None",
"_____no_output_____"
]
],
[
[
"# Финальный аккорд - проверим лучшую модель на test set\n\nВ качестве разнообразия - напишите код для прогона модели на test set вы.\n\nВ результате вы должны натренировать модель, которая покажет более **90%** точности на test set. \nКак водится, лучший результат в группе получит дополнительные баллы!",
"_____no_output_____"
]
],
[
[
"# TODO Write the code to compute accuracy on test set\nfinal_test_accuracy = 0.0\nprint(\"Final test accuracy - \", final_test_accuracy)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fbc78d59ffeb476d58aac34e3b1faf64dbb39e | 36,196 | ipynb | Jupyter Notebook | misc/baxter/derivation.ipynb | YoshimitsuMatsutaIe/rmp_test | a7c94ff68b518ef51821484795c308c2c8519c4c | [
"MIT"
] | null | null | null | misc/baxter/derivation.ipynb | YoshimitsuMatsutaIe/rmp_test | a7c94ff68b518ef51821484795c308c2c8519c4c | [
"MIT"
] | null | null | null | misc/baxter/derivation.ipynb | YoshimitsuMatsutaIe/rmp_test | a7c94ff68b518ef51821484795c308c2c8519c4c | [
"MIT"
] | null | null | null | 39.088553 | 292 | 0.448834 | [
[
[
"baxterのmap求める",
"_____no_output_____"
]
],
[
[
"import sympy as sy\nfrom sympy import sin, cos, pi, sqrt\nimport math\n#from math import pi\nq = sy.Matrix(sy.MatrixSymbol('q', 7, 1))\nL, h, H, L0, L1, L2, L3, L4, L5, L6, R = sy.symbols('L, h, H, L0, L1, L2, L3, L4, L5, L6, R')\n# L = 278e-3\n# h = 64e-3\n# H = 1104e-3\n# L0 = 270.35e-3\n# L1 = 69e-3\n# L2 = 364.35e-3\n# L3 = 69e-3\n# L4 = 374.29e-3\n# L5 = 10e-3\n# L6 = 368.3e-3\ndef HTM(alpha, a, d, theta):\n return sy.Matrix([\n [cos(theta), -sin(theta), 0, a],\n [sin(theta)*cos(alpha), cos(theta)*cos(alpha), -sin(alpha), -d*sin(alpha)],\n [sin(theta)*sin(alpha), cos(theta)*sin(alpha), cos(alpha), d*cos(alpha)],\n [0, 0, 0, 1],\n ])\n\n\n\nDHparams = (\n (0, 0, 0, q[0, 0]),\n (-pi/2, L1, 0, q[1, 0]+pi/2),\n (pi/2, 0, L2, q[2, 0]),\n (-pi/2, L3, 0, q[3, 0]),\n (pi/2, 0, L4, q[4, 0]),\n (-pi/2, L5, 0, q[5, 0]),\n (pi/2, 0, 0, q[6, 0]),\n)\n\n\nT_RL_W0 = sy.Matrix([\n [-sqrt(2)/2, sqrt(2)/2, 0, -L,],\n [-sqrt(2)/2, -sqrt(2)/2, 0, -h,],\n [0, 0, 1, H,],\n [0, 0, 0, 1,],\n])\n\nT_0_RL = sy.Matrix([\n [1, 0, 0, 0,],\n [0, 1, 0, 0,],\n [0, 0, 1, L0,],\n [0, 0, 0, 1,],\n])\n\nTs = [HTM(*dhparam) for dhparam in DHparams]\n\nT_GR_7 = sy.Matrix([\n [1, 0, 0, 0,],\n [0, 1, 0, 0,],\n [0, 0, 1, L6,],\n [0, 0, 0, 1,],\n])\n\n\n### 変換前一覧 ###\nT_all = [T_RL_W0, T_0_RL]\nT_all += Ts\nT_all.append(T_GR_7)\n\n\n### 変換後 ###\nfor i, T in enumerate(T_all):\n if i == 0:\n T_abs = [T]\n else:\n T_abs.append(T_abs[i-1] @ T)\n\n\nos = [T[0:3, 3:4] for T in T_abs]\nRxs = [T[0:3, 0:1] for T in T_abs]\nRys = [T[0:3, 1:2] for T in T_abs]\nRzs = [T[0:3, 2:3] for T in T_abs]\n\nJos = [o.jacobian(q) for o in os]\nJRxs = [r.jacobian(q) for r in Rxs]\nJRys = [r.jacobian(q) for r in Rys]\nJRzs = [r.jacobian(q) for r in Rzs]\n\n\n\nt = sy.Symbol(\"t\")\nq1 = sy.Function(\"q1\")\nq2 = sy.Function(\"q2\")\nq3 = sy.Function(\"q3\")\nq4 = sy.Function(\"q4\")\nq5 = sy.Function(\"q5\")\nq6 = sy.Function(\"q6\")\nq7 = sy.Function(\"q7\")\n\ndq = sy.Matrix(sy.MatrixSymbol('dq', 7, 1))\n\nT_abs_ = []\nfor T in T_abs:\n T_ = T.subs([\n (q[0,0], q1(t)),\n (q[1,0], q2(t)),\n (q[2,0], q3(t)),\n (q[3,0], q4(t)),\n (q[4,0], q5(t)),\n (q[5,0], q6(t)),\n (q[6,0], q7(t)),\n ])\n T_abs_.append(T_)\n\n\nos_ = [T[0:3, 3:4] for T in T_abs_]\nRxs_ = [T[0:3, 0:1] for T in T_abs_]\nRys_ = [T[0:3, 1:2] for T in T_abs_]\nRzs_ = [T[0:3, 2:3] for T in T_abs_]\n\nq_ = sy.Matrix([\n [q1(t)],\n [q2(t)],\n [q3(t)],\n [q4(t)],\n [q5(t)],\n [q6(t)],\n [q7(t)],\n])\nJos_ = [o.jacobian(q_) for o in os_]\nJRxs_ = [r.jacobian(q_) for r in Rxs_]\nJRys_ = [r.jacobian(q_) for r in Rys_]\nJRzs_ = [r.jacobian(q_) for r in Rzs_]\n\nJos_dot_ = [sy.diff(J, t) for J in Jos_]\nJRxs_dot_ = [sy.diff(J, t) for J in JRxs_]\nJRys_dot_ = [sy.diff(J, t) for J in JRys_]\nJRzs_dot_ = [sy.diff(J, t) for J in JRzs_]\n\n\nJos_dot = []\nJRxs_dot = []\nJRys_dot = []\nJRzs_dot = []\nfor Js, newJs in zip((Jos_dot_, JRxs_dot_, JRys_dot_, JRzs_dot_), (Jos_dot, JRxs_dot, JRys_dot, JRzs_dot)):\n for J in Js:\n newJs.append(J.subs([\n (sy.Derivative(q1(t),t), dq[0, 0]),\n (sy.Derivative(q2(t),t), dq[1, 0]),\n (sy.Derivative(q3(t),t), dq[2, 0]),\n (sy.Derivative(q4(t),t), dq[3, 0]),\n (sy.Derivative(q5(t),t), dq[4, 0]),\n (sy.Derivative(q6(t),t), dq[5, 0]),\n (sy.Derivative(q7(t),t), dq[6, 0]),\n (q1(t), q[0, 0]),\n (q2(t), q[1, 0]),\n (q3(t), q[2, 0]),\n (q4(t), q[3, 0]),\n (q5(t), q[4, 0]),\n (q6(t), q[5, 0]),\n (q7(t), q[6, 0]),\n ]))\n\nos = [sy.expand(e) for e in os]\nRxs = [sy.expand(e) for e in Rxs]\nRys = [sy.expand(e) for e in Rys]\nRzs = [sy.expand(e) for e in Rzs]\nJos = [sy.expand(e) for e in Jos]\nJRxs = [sy.expand(e) for e in JRxs]\nJRys = [sy.expand(e) for e in JRys]\nJRzs = [sy.expand(e) for e in JRzs]\nJos_dot = [sy.expand(e) for e in Jos_dot]\nJRxs_dot = [sy.expand(e) for e in JRxs_dot]\nJRys_dot = [sy.expand(e) for e in JRys_dot]\nJRzs_dot = [sy.expand(e) for e in JRzs_dot]\n\nexpr_all = [os, Rxs, Rys, Rzs, Jos, JRxs, JRys, JRzs, Jos_dot, JRxs_dot, JRys_dot, JRzs_dot]\nnames = [\"W0\", \"BR\"] + [str(i) for i in range(7)] + [\"ee\"]\nexpr_name = [\n [\"o_\" + n for n in names],\n [\"rx_\" + n for n in names],\n [\"ry_\" + n for n in names],\n [\"rz_\" + n for n in names],\n [\"jo_\" + n for n in names],\n [\"jrx_\" + n for n in names],\n [\"jry_\" + n for n in names],\n [\"jrz_\" + n for n in names],\n [\"jo_\" + n + \"_dot\" for n in names],\n [\"jrx_\" + n + \"_dot\" for n in names],\n [\"jry_\" + n + \"_dot\" for n in names],\n [\"jrz_\" + n + \"_dot\" for n in names],\n]",
"_____no_output_____"
],
[
"from sympy.printing import cxxcode\nfrom sympy.utilities.codegen import codegen\nimport os as OS\n\n\noriginal = \"cpp_\"\ndone = \"cpp\"\n\nOS.makedirs(original, exist_ok=True)\nOS.makedirs(done, exist_ok=True)\n\n\ndef gen_cpp_code(expr, name):\n code_txt = cxxcode(expr, assign_to=\"out\", standard=\"c++17\")\n with open(name+\".cpp\", \"w\") as f:\n f.write(code_txt)\n\ndef gen_c(expr, name, dir=\"\"):\n [(c_name, c_code), (h_name, c_header)] = codegen(\n name_expr=(name, expr),\n language=\"C\",\n project= name + \"project\",\n to_files=False\n )\n \n f = open(dir+c_name, 'w')\n f.write(c_code)\n f.close()\n\n f = open(dir+h_name, 'w')\n f.write(c_header)\n f.close()\n \n return c_code, c_header\n\nnames = [\"W0\", \"BR\"] + [str(i) for i in range(7)] + [\"ee\"]\n\nwith open(original+\"/htm.cpp\", \"w\") as fc, open(original+\"/htm.hpp\", \"w\") as fh:\n for i, o in enumerate(os):\n c, h = gen_c(o, name=\"o_\"+names[i])\n fc.write(c)\n fh.write(h)\n for i, o in enumerate(Rxs):\n c, h = gen_c(o, name=\"rx_\"+names[i])\n fc.write(c)\n fh.write(h)\n for i, o in enumerate(Rys):\n c, h = gen_c(o, name=\"ry_\"+names[i])\n fc.write(c)\n fh.write(h)\n for i, o in enumerate(Rzs):\n c, h = gen_c(o, name=\"rz_\"+names[i])\n fc.write(c)\n fh.write(h)\n\nwith open(original+\"/Jos.cpp\", \"w\") as fc, open(original+\"/Jos.hpp\", \"w\") as fh:\n for i, o in enumerate(Jos):\n c, h = gen_c(o, name=\"jo_\"+names[i])\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRxs.cpp\", \"w\") as fc, open(original+\"/JRxs.hpp\", \"w\") as fh:\n for i, o in enumerate(JRxs):\n c, h = gen_c(o, name=\"jrx_\"+names[i])\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRys.cpp\", \"w\") as fc, open(original+\"/JRys.hpp\", \"w\") as fh:\n for i, o in enumerate(JRzs):\n c, h = gen_c(o, name=\"jry_\"+names[i])\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRzs.cpp\", \"w\") as fc, open(original+\"/JRzs.hpp\", \"w\") as fh:\n for i, o in enumerate(JRzs):\n c, h = gen_c(o, name=\"jrz_\"+names[i])\n fc.write(c)\n fh.write(h)\n\nwith open(original+\"/Jo_dots.cpp\", \"w\") as fc, open(original+\"/Jo_dots.hpp\", \"w\") as fh:\n for i, o in enumerate(Jos_dot):\n c, h = gen_c(o, name=\"jo_\"+names[i]+\"_dot\")\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRx_dots.cpp\", \"w\") as fc, open(original+\"/JRx_dots.hpp\", \"w\") as fh:\n for i, o in enumerate(JRxs_dot):\n c, h = gen_c(o, name=\"jrx_\"+names[i]+\"_dot\")\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRy_dots.cpp\", \"w\") as fc, open(original+\"/JRy_dots.hpp\", \"w\") as fh:\n for i, o in enumerate(JRzs_dot):\n c, h = gen_c(o, name=\"jry_\"+names[i]+\"_dot\")\n fc.write(c)\n fh.write(h)\nwith open(original+\"/JRz_dots.cpp\", \"w\") as fc, open(original+\"/JRz_dots.hpp\", \"w\") as fh:\n for i, o in enumerate(JRzs_dot):\n c, h = gen_c(o, name=\"jrz_\"+names[i]+\"_dot\")\n fc.write(c)\n fh.write(h)\n\n",
"_____no_output_____"
],
[
"### これが本物 ###\nfrom sympy.printing import cxxcode\nfrom sympy.utilities.codegen import codegen\nimport os as OS\n\n\noriginal = \"cpp_original\"\ndone = \"cpp_done\"\n\nOS.makedirs(original, exist_ok=True)\nOS.makedirs(original+\"/include\", exist_ok=True)\nOS.makedirs(original+\"/src\", exist_ok=True)\n\n\ndef gen_cpp_code(expr, name, dir):\n [(c_name, c_code), (h_name, c_header)] = codegen(\n name_expr=(name, expr),\n language=\"C\",\n project= name + \"_BY_SYMPY_\",\n to_files=False\n )\n \n f = open(dir+\"/src/\"+name+\".cpp\", 'w')\n f.write(c_code)\n f.close()\n\n f = open(dir+\"/include/\"+h_name.replace(\".h\", \"\")+\".hpp\", 'w')\n f.write(c_header)\n f.close()\n\nfor exprs, names in zip(expr_all, expr_name):\n for expr, name in zip(exprs, names):\n gen_cpp_code(expr, name, original)",
"_____no_output_____"
],
[
"com = \"#ifndef BAXTER_HPP\\n\" \\\n + \"#define BAXTER_HPP\\n\" \\\n + \"#include<eigen3/Eigen/Core>\\n\" \\\n + \"namespace baxter\\n\" \\\n + \"{\\n\" \\\n + \" using Eigen::VectorXd;\\n\" \\\n + \" using Eigen::MatrixXd;\\n\" \\\n + \" static const double L = 278e-3;\\n\" \\\n + \" static const double h = 64e-3;\\n\" \\\n + \" static const double H = 1104e-3;\\n\" \\\n + \" static const double L0 = 270.35e-3;\\n\" \\\n + \" static const double L1 = 69e-3;\\n\" \\\n + \" static const double L2 = 364.35e-3;\\n\" \\\n + \" static const double L3 = 69e-3;\\n\" \\\n + \" static const double L4 = 374.29e-3;\\n\" \\\n + \" static const double L5 = 10e-3;\\n\" \\\n + \" static const double L6 = 368.3e-3;\\n\"\n\nfor ns in expr_name[0:4]:\n for n in ns:\n com += (\" void \" + n + \"(const VectorXd& q, VectorXd& out);\\n\")\nfor ns in expr_name[4:8]:\n for n in ns:\n com += (\" void \" + n + \"(const VectorXd& q, MatrixXd& out);\\n\")\nfor ns in expr_name[8:12]:\n for n in ns:\n com += (\" void \" + n + \"(const VectorXd& q, const VectorXd& q_dot, MatrixXd& out);\\n\")\n\ncom += \"};\\n#endif\"",
"_____no_output_____"
],
[
"### 変換 ###\nimport re\ndone = \"cpp_done\"\nOS.makedirs(done, exist_ok=True)\nOS.makedirs(done+\"/include\", exist_ok=True)\nOS.makedirs(done+\"/src\", exist_ok=True)\n\npat = r'out_(.+?)\\['\npat2 = r'out_(.+?)\\)'\npat3 = r'\\((.+?)\\) {'\npat4 = r'#(.+?).h\\\"'\n\nsout = [\"out[\" + str(i) + \"]\" for i in range(21)]\nsout_2 = [\"out(0,0)\",\"out(0,1)\",\"out(0,2)\",\"out(0,3)\",\"out(0,4)\",\"out(0,5)\",\"out(0,6)\",\"out(1,0)\",\"out(1,1)\",\"out(1,2)\",\"out(1,3)\",\"out(1,4)\",\"out(1,5)\",\"out(1,6)\",\"out(2,0)\",\"out(2,1)\",\"out(2,2)\",\"out(2,3)\",\"out(2,4)\",\"out(2,5)\",\"out(2,6)\"]\n\nwith open(\"cpp_done/include/baxter.hpp\", \"w\") as f:\n f.write(com)\n\n\n\ndef common_trans(line):\n r = re.findall(pat, line)\n r2 = re.findall(pat2, line)\n if len(r) != 0:\n line = line.replace(\"out_\" + r[0], \"out\")\n if len(r2) != 0:\n line = line.replace(\"out_\" + r2[0], \"out\")\n line = line.replace(\"q[0]\", \"q(0)\")\n line = line.replace(\"q[1]\", \"q(1)\")\n line = line.replace(\"q[2]\", \"q(2)\")\n line = line.replace(\"q[3]\", \"q(3)\")\n line = line.replace(\"q[4]\", \"q(4)\")\n line = line.replace(\"q[5]\", \"q(5)\")\n line = line.replace(\"q[6]\", \"q(6)\")\n \n # line = line.replace(\"double L, \", \"\")\n # line = line.replace(\"double h, \", \"\")\n # line = line.replace(\"double H, \", \"\")\n # line = line.replace(\"double L0, \", \"\")\n # line = line.replace(\"double L1, \", \"\")\n # line = line.replace(\"double L2, \", \"\")\n # line = line.replace(\"double L3, \", \"\")\n # line = line.replace(\"double L4, \", \"\")\n # line = line.replace(\"double L5, \", \"\")\n # line = line.replace(\"double L6, \", \"\")\n \n \n r3 = re.findall(pat3, line)\n if \"j\" not in name:\n if len(r3) != 0:\n print(\"(\"+r3[0]+\")\")\n #line = line.replace(\"(\"+r3[0]+\") {\", \"(const VectorXd& q, VectorXd& out) {\")\n line = line.replace(\"(\"+r3[0]+\") {\", \"(const VectorXd& q, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, VectorXd& out) {\")\n line = line.replace(\"double *out\", \"VectorXd& out\")\n line = line.replace(\"out[0]\", \"out(0)\")\n line = line.replace(\"out[1]\", \"out(1)\")\n line = line.replace(\"out[2]\", \"out(2)\")\n\n \n else:\n if \"dot\" in name:\n if len(r3) != 0:\n line = line.replace(r3[0], \"const VectorXd& q, const VectorXd& dq, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, MatrixXd& out\")\n else:\n if len(r3) != 0:\n print(name)\n line = line.replace(r3[0], \"const VectorXd& q, double L, double h, double H, double L0, double L1, double L2, double L3, double L4, double L5, double L6, MatrixXd& out\")\n line = line.replace(\"double *out\", \"MatrixXd& out\")\n for s, t in zip(sout, sout_2):\n line = line.replace(s, t)\n \n\n\n return line\n\n\ndef trans_cpp(name):\n origin = \"cpp_original/src/\" + name + \".cpp\"\n done = \"cpp_done/src/\" + name + \".cpp\"\n with open(origin, \"r\") as f, open(done, \"w\") as g:\n file_data = f.readlines()\n for line in file_data:\n line = line.replace('#include <math.h>', '#include <cmath>\\nusing std::cos;\\nusing std::sin;\\nusing std::sqrt;\\n')\n #line = line.replace(\"#include \\\"\", \"#include \\\"../../include/baxter/\")\n #line = line.replace(\".h\\\"\", \".hpp\\\"\\n#include \\\"../../include/baxter/common.hpp\\\"\\n\")\n r4 = re.findall(pat4, line)\n if len(r4) != 0:\n line = line.replace(\"#\"+r4[0]+\".h\\\"\", \"#include \\\"../include/baxter.hpp\\\"\\n\")\n line = line.replace(\"void \", \"void baxter::\")\n line = line.replace(\"double *q\", \"const VectorXd& q\").replace(\"double *dq\", \"const VectorXd& dq\")\n \n line = common_trans(line)\n\n\n g.write(line)\n\n\n# def trans_hpp(name):\n# origin = \"cpp_original/include/\" + name + \".hpp\"\n# done = \"cpp_done/include/\" + name + \".hpp\"\n# with open(origin, \"r\") as f, open(done, \"w\") as g:\n# file_data = f.readlines()\n# for line in file_data:\n# line = line.replace(\"void \", \"#include<eigen3/Eigen/Core>\\nnamespace baxter\\n{\\nusing Eigen::VectorXd;\\nusing Eigen::MatrixXd;\\nvoid \").replace(\");\", \");\\n}\\n\")\n# line = line.replace(\"double *q\", \"const VectorXd& q\").replace(\"double *dq\", \"const VectorXd& dq\")\n \n# line = common_trans(line)\n\n# g.write(line)\n\nfor names in expr_name:\n for name in names:\n trans_cpp(name)\n #trans_hpp(name)",
"(double H, double L, double h, double *out)\n(double H, double L, double L0, double h, double *out)\n(double H, double L, double L0, double h, double *out)\n(double H, double L, double L0, double L1, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double L3, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double L3, double L4, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double L3, double L4, double L5, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double L3, double L4, double L5, double h, const VectorXd& q, double *out)\n(double H, double L, double L0, double L1, double L2, double L3, double L4, double L5, double L6, double h, const VectorXd& q, double *out)\n(double *out)\n(double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(double *out)\n(double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(double *out)\n(double *out)\n(double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\n(const VectorXd& q, double *out)\njo_W0\njo_BR\njo_0\njo_1\njo_2\njo_3\njo_4\njo_5\njo_6\njo_ee\njrx_W0\njrx_BR\njrx_0\njrx_1\njrx_2\njrx_3\njrx_4\njrx_5\njrx_6\njrx_ee\njry_W0\njry_BR\njry_0\njry_1\njry_2\njry_3\njry_4\njry_5\njry_6\njry_ee\njrz_W0\njrz_BR\njrz_0\njrz_1\njrz_2\njrz_3\njrz_4\njrz_5\njrz_6\njrz_ee\n"
],
[
"hoho = \"void baxter::o_W0(VectorXd& out) {\"\n",
"_____no_output_____"
],
[
"# pythonコード生成(クラス)\nfrom sympy.printing.numpy import NumPyPrinter\n\nnames = [\"W0\", \"BR\"] + [str(i) for i in range(7)] + [\"ee\"]\n\ncommon_w = \"import numpy as np\\nfrom math import cos as c\\nfrom math import sin as s\\nfrom math import tan as t\\nfrom math import sqrt as sq\\nfrom base import Base\\n\"\n\n\n\nwith open(\"src_py_/htm.py\", \"w\") as f:\n f.write(common_w + \"class HTM(Base):\\n\")\n for name, z in zip(names, os):\n numpy_word = \" def o_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rxs):\n numpy_word = \" def rx_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rys):\n numpy_word = \" def ry_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rzs):\n numpy_word = \" def rz_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n\nwith open(\"src_py_/Jos.py\", \"w\") as f:\n f.write(common_w + \"class Jo(Base):\\n\")\n for name, z in zip(names, Jos):\n numpy_word = \" def jo_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRxs.py\", \"w\") as f:\n f.write(common_w + \"class JRx(Base):\\n\")\n for name, z in zip(names, JRxs):\n numpy_word = \" def jrx_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRys.py\", \"w\") as f:\n f.write(common_w + \"class JRy(Base):\\n\")\n for name, z in zip(names, JRys):\n numpy_word = \" def jry_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRzs.py\", \"w\") as f:\n f.write(common_w + \"class JRz(Base):\\n\")\n for name, z in zip(names, JRzs):\n numpy_word = \" def jrz_\" + name + \"(self, q):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n\nwith open(\"src_py_/Jo_dots.py\", \"w\") as f:\n f.write(common_w + \"class Jo_dot(Base):\\n\")\n for name, z in zip(names, Jos_dot):\n numpy_word = \" def jo_\" + name + \"_dot(self, q, dq):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRx_dots.py\", \"w\") as f:\n f.write(common_w + \"class JRx_dot(Base):\\n\")\n for name, z in zip(names, JRxs_dot):\n numpy_word = \" def jrx_\" + name + \"_dot(self, q, dq):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRy_dots.py\", \"w\") as f:\n f.write(common_w + \"class JRy_dot(Base):\\n\")\n for name, z in zip(names, JRys):\n numpy_word = \" def jry_\" + name + \"_dot(self, q, dq):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRz_dots.py\", \"w\") as f:\n f.write(common_w + \"class JRz_dot(Base):\\n\")\n for name, z in zip(names, JRzs):\n numpy_word = \" def jrz_\" + name + \"_dot(self, q, dq):\\n return \"\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n\ndef translate_hoge(original, done):\n with open(original, \"r\") as f, open(done, \"w\") as g:\n file_data = f.readlines()\n for line in file_data:\n line = line.replace('numpy', 'np').replace('1/2', '0.5').replace('(0.5)', '0.5')\n line = line.replace('np.cos', 'c').replace('np.sin', 's').replace('np.sqrt', 'sq')\n #line = line.replace('L', 'self.L').replace('h', 'self.h').replace('H', 'self.H')\n line = line.replace('import np as np', 'import numpy as np')\n line = line.replace('matself.h', 'math')\n g.write(line)\n\n\ntranslate_hoge(\"src_py_/htm.py\", \"src_py/htm.py\")\ntranslate_hoge(\"src_py_/Jos.py\", \"src_py/Jos.py\")\ntranslate_hoge(\"src_py_/JRxs.py\", \"src_py/JRxs.py\")\ntranslate_hoge(\"src_py_/JRys.py\", \"src_py/JRys.py\")\ntranslate_hoge(\"src_py_/JRzs.py\", \"src_py/JRzs.py\")\ntranslate_hoge(\"src_py_/Jo_dots.py\", \"src_py/Jo_dots.py\")\ntranslate_hoge(\"src_py_/JRx_dots.py\", \"src_py/JRx_dots.py\")\ntranslate_hoge(\"src_py_/JRy_dots.py\", \"src_py/JRy_dots.py\")\ntranslate_hoge(\"src_py_/JRz_dots.py\", \"src_py/JRz_dots.py\")",
"_____no_output_____"
],
[
"from sympy.printing.numpy import NumPyPrinter\n\nnames = [\"W0\", \"BR\"] + [str(i) for i in range(7)] + [\"ee\"]\n\ncommon_w = \"import numpy as np\\nfrom math import cos as c\\nfrom math import sin as s\\nfrom math import tan as ta\\nfrom math import sqrt as sq\\n\"\n\nnumba_word_q = \"@njit(\\\"f8[:, :](f8[:, :])\\\")\\n\"\nnumba_word_q_dq = \"@njit(\\\"f8[:, :](f8[:, :], f8[:, :])\\\")\\n\"\n\nwith open(\"src_py_/htm.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, os):\n numpy_word = \"def o_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rxs):\n numpy_word = \"def rx_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rys):\n numpy_word = \"def ry_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n for name, z in zip(names, Rzs):\n numpy_word = \"def rz_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n\nwith open(\"src_py_/Jos.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, Jos):\n numpy_word = \"def jo_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRxs.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRxs):\n numpy_word = \"def jrx_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRys.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRys):\n numpy_word = \"def jry_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRzs.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRzs):\n numpy_word = \"def jrz_\" + name + \"(q):\\n return \"\n #f.write(numba_word_q)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(z))\n f.write(\"\\n\")\n\n\nwith open(\"src_py_/Jo_dots.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, Jos_dot):\n numpy_word = \"def jo_\" + name + \"_dot(q, dq):\\n return \"\n #f.write(numba_word_q_dq)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(sy.simplify(z)))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRx_dots.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRxs_dot):\n numpy_word = \"def jrx_\" + name + \"_dot(q, dq):\\n return \"\n #f.write(numba_word_q_dq)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(sy.simplify(z)))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRy_dots.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRys):\n numpy_word = \"def jry_\" + name + \"_dot(q, dq):\\n return \"\n #f.write(numba_word_q_dq)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(sy.simplify(z)))\n f.write(\"\\n\")\n\nwith open(\"src_py_/JRz_dots.py\", \"w\") as f:\n f.write(common_w)\n for name, z in zip(names, JRzs):\n numpy_word = \"def jrz_\" + name + \"_dot(q, dq):\\n return \"\n #f.write(numba_word_q_dq)\n f.write(numpy_word)\n f.write(NumPyPrinter().doprint(sy.simplify(z)))\n f.write(\"\\n\")\n\n\ndef translate_hoge(original, done):\n with open(original, \"r\") as f, open(done, \"w\") as g:\n file_data = f.readlines()\n for line in file_data:\n line = line.replace('numpy', 'np').replace('1/2', '0.5').replace('(0.5)', '0.5')\n line = line.replace('np.cos', 'c').replace('np.sin', 's').replace('np.sqrt', 'sq')\n # line = line.replace(']])', ']], dtype=np.float64)')\n # line = line.replace('[0, 0, 0, 0, 0, 0, 0]', '[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]')\n # line = line.replace('[0]', '[0.0]').replace(' 0]],', ' 0.0]],').replace('[1]', '[1.0]').replace('[[0,', '[[0.0,').replace('0.0, 0],', '0.0, 0.0],')\n line = line.replace('import np as np', 'import numpy as np')\n g.write(line)\n\n\ntranslate_hoge(\"src_py_/htm.py\", \"src_py_no_class/htm.py\")\ntranslate_hoge(\"src_py_/Jos.py\", \"src_py_no_class/Jos.py\")\ntranslate_hoge(\"src_py_/JRxs.py\", \"src_py_no_class/JRxs.py\")\ntranslate_hoge(\"src_py_/JRys.py\", \"src_py_no_class/JRys.py\")\ntranslate_hoge(\"src_py_/JRzs.py\", \"src_py_no_class/JRzs.py\")\ntranslate_hoge(\"src_py_/Jo_dots.py\", \"src_py_no_class/Jo_dots.py\")\ntranslate_hoge(\"src_py_/JRx_dots.py\", \"src_py_no_class/JRx_dots.py\")\ntranslate_hoge(\"src_py_/JRy_dots.py\", \"src_py_no_class/JRy_dots.py\")\ntranslate_hoge(\"src_py_/JRz_dots.py\", \"src_py_no_class/JRz_dots.py\")",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fbe104bad3079635a0970cc55d68a6ad4a3d72 | 674,184 | ipynb | Jupyter Notebook | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing | d8efb7ed3cb6b506b40f011ebee12774e01c1c4f | [
"MIT"
] | null | null | null | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing | d8efb7ed3cb6b506b40f011ebee12774e01c1c4f | [
"MIT"
] | null | null | null | total-collapsing.ipynb | stefaniaebli/dmt-signal-processing | d8efb7ed3cb6b506b40f011ebee12774e01c1c4f | [
"MIT"
] | null | null | null | 1,484.986784 | 131,084 | 0.95868 | [
[
[
"import numpy as np\nimport random\nimport gudhi as gd\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors as mcolors\nimport sys\nsys.path.append('code')\nimport dmtsignal as dmt\nimport dmtvisual as dmtvis\nimport importlib\nimport warnings\nwarnings.filterwarnings(\"ignore\")\ndmt = importlib.reload(dmt)\ndmtvis = importlib.reload(dmtvis)",
"_____no_output_____"
],
[
"#X=dmt.extract_simplices(st)\nX=np.load(\"./data/X.npy\",allow_pickle=True)\npoints=np.load(\"./data/points.npy\",allow_pickle=True)\nkX=dmt.build_boundaries(X)\ns1=np.array(dmtvis.height_function(X,points))\n#s1=np.load(\"./data/random_signal.npy\",allow_pickle=True)\n#s1=np.array(dmtvis.dist_center_function(X,points))\n",
"_____no_output_____"
],
[
"s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))\ns2 =np.random.uniform(size=len(X[2]))\nfig = plt.figure(figsize=(6,7))\nax = fig.add_subplot(111)\ndmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)\ndmtvis.plot_edges(s1.copy(),points,X, ax, zorder=2,linewidths=2)\ndmtvis.plot_triangles_plain('lavenderblush',points,X, ax, zorder=1)\ncbar=plt.colorbar(ax.collections[0], ax=ax,orientation=\"horizontal\")\n#cbar.set_ticklabels(np.arange(s1.max() ,s1.min(),6))\nprint([s1.min(),s1.max()])\nax.set_xticks([])\nax.set_yticks([]) \nplt.savefig('./figures/ex_coll_1.pdf')\nplt.show()",
"[0.0057809641226661546, 0.9829323903764209]\n"
]
],
[
[
"### Collapse all 2-cells",
"_____no_output_____"
]
],
[
[
"all_X,collapses,all_losses,total_loss,all_signals,phispsis= dmt.sequence_optimal_up_collapses(X=X,kX=kX,dimq=1,signal=s1,steps=120)\ncolX=all_X[-1]\ncolS=all_signals[-1]",
"_____no_output_____"
],
[
"s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))\nf_X=all_X[-1]\nf_s=all_signals[-1]\nfig = plt.figure(figsize=(6,7))\nax = fig.add_subplot(111)\ndmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)\ndmtvis.plot_edges(f_s.copy(),points,f_X, ax, zorder=2,linewidths=2)\ndmtvis.plot_triangles_plain('lavenderblush',points,f_X, ax, zorder=1)\n\ncbar=plt.colorbar(ax.collections[0], ax=ax,orientation=\"horizontal\")\ncbar.set_ticklabels(np.around(np.append(np.arange(f_s.min(),f_s.max(),(f_s.max())/5),f_s.max()),decimals=1))\nax.set_xticks([])\nax.set_yticks([]) \nprint([f_s.min(),f_s.max()])\nplt.savefig('./figures/ex_coll_2.pdf')\nplt.show()",
"[0.009071906103450211, 1.4862755556059564]\n"
],
[
"s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))\ns2 =np.random.uniform(size=len(X[2]))\nsr=phispsis\nfig = plt.figure(figsize=(6,7))\nax = fig.add_subplot(111)\ndmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)\ndmtvis.plot_edges(sr.copy(),points,X, ax, zorder=2,linewidths=2)\ndmtvis.plot_triangles_plain('lavenderblush',points,X, ax, zorder=1)\ncbar=plt.colorbar(ax.collections[0], ax=ax,orientation=\"horizontal\")\ncbar.set_ticklabels(np.around(np.append(np.arange(sr.min(),sr.max(),(sr.max())/5),sr.max()),decimals=1))\n\n#cbar.set_ticklabels(np.arange(s1.max() ,s1.min(),6))\nprint([sr.min(),sr.max()])\nax.set_xticks([])\nax.set_yticks([]) \nplt.savefig('./figures/ex_coll_3.pdf')\nplt.show()",
"[0.0, 1.4862755556059564]\n"
],
[
"s0 = ['black']*len(X[0])#np.zeros(len(simplices[0]))\ns2 =np.random.uniform(size=len(X[2]))\nsl=np.abs(s1-phispsis)\nfig = plt.figure(figsize=(6,7))\nax = fig.add_subplot(111)\ndmtvis.plot_nodes(s0, points,ax, zorder=3,s=30)\ndmtvis.plot_edges(sl.copy(),points,X, ax, zorder=2,linewidths=2)\ndmtvis.plot_triangles_plain('lavenderblush',points,X, ax, zorder=1)\ncbar=plt.colorbar(ax.collections[0], ax=ax,orientation=\"horizontal\")\n#cbar.set_ticklabels([])\na=np.around(np.append(np.arange(sl.min(),sl.max(),(sl.max())/5),sl.max()),decimals=1)\ncbar.set_ticklabels(a)\nprint([sl.min(),sl.max()])\nax.set_xticks([])\nax.set_yticks([]) \nplt.savefig('./figures/ex_coll_4.pdf')\nplt.show()",
"[0.0, 1.0978804312635513]\n"
],
[
"dmtvis.plot_hodge_decomp(X,s1,kX,phispsis,trange=30,type_collapse='up')\nplt.savefig('./figures/hodge_new.pdf')",
"_____no_output_____"
]
],
[
[
"### Randomly collapse 2-cells",
"_____no_output_____"
]
],
[
[
"all_X_rand,collapses_rand,all_losses_rand,total_loss_rand,all_signals_rand,phispsis_rand= dmt.sequence_optimal_up_collapses(X=X,kX=kX,dimq=1,signal=s1,steps=244,random=True)\ncolX_rand=all_X_rand[-1]\ncolS_rand=all_signals_rand[-1]",
"_____no_output_____"
],
[
"dmtvis.plot_hodge_decomp(X,s1,kX,phispsis_rand,trange=30,type_collapse='up')\nplt.savefig('./figures/hodge_multiple_random_collapses_uniform.pdf')",
"_____no_output_____"
]
],
[
[
"### Comparing losses",
"_____no_output_____"
]
],
[
[
"def CI_plot_y(data, conf = .95):\n from scipy.stats import sem, t\n n = np.array(data).shape[0]\n std_err = sem(data,axis = 0)\n h = std_err * t.ppf((1 + .95) / 2, n - 1)\n return h",
"_____no_output_____"
],
[
"typ=['normal','uniform','height','center']\nsteps=np.arange(244)\ns=[1,50,100,150,200,240]\nfor j in typ:\n\n l=np.load('./data/data_optimal_{}_sim0.npy'.format(j))[:,0,:]\n rl=np.load('./data/data_random_{}_sim0.npy'.format(j))[:,0,:]\n #l1=np.load('./data/data_optimal_sim0.npy'.format(j))[:,0,:]\n #rl1=np.load('./data/data_random_sim{0.npy'.format(j))[:,0,:]\n\n fig = plt.figure(figsize=(7,5))\n m = np.array(l).mean(axis=0)\n h = CI_plot_y(np.array(l))\n plt.plot(steps,m,label=\"Optimal pairing\")\n plt.fill_between(steps,m-h,m+h,alpha=.5,zorder=0)\n\n m = np.array(rl).mean(axis=0)\n h = CI_plot_y(np.array(rl))\n plt.plot(steps,m,c='green',label=\"Random pairing\")\n plt.fill_between(steps,m-h,m+h,alpha=.3,zorder=0,color='green')\n plt.xticks(s)\n #plt.savefig('./figures/topo_error.pdf')\n plt.xlabel(\"Number of iterations\")\n plt.ylabel(\"Topological reconstruction loss\")\n #plt.title(\"Signal on the 1-cells: {}\".format(j))\n plt.legend(loc='upper left')\n\n plt.savefig('./figures/topological_loss_{}.pdf'.format(j))\n\n plt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7fbf792b24818cfc6f63d8fbdac096f078813bd | 307,560 | ipynb | Jupyter Notebook | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning | bcd16ac602f011292bd1d5540ef3833cd3fd7c72 | [
"MIT"
] | null | null | null | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning | bcd16ac602f011292bd1d5540ef3833cd3fd7c72 | [
"MIT"
] | null | null | null | d2l-en/chapter_computer-vision/object-detection-dataset.ipynb | mru4913/Dive-into-Deep-Learning | bcd16ac602f011292bd1d5540ef3833cd3fd7c72 | [
"MIT"
] | null | null | null | 1,627.301587 | 299,432 | 0.957995 | [
[
[
"# Object Detection Data Set (Pikachu)\n\nThere are no small data sets, like MNIST or Fashion-MNIST, in the object detection field. In order to quickly test models, we are going to assemble a small data set. First, we generate 1000 Pikachu images of different angles and sizes using an open source 3D Pikachu model. Then, we collect a series of background images and place a Pikachu image at a random position on each image. We use the [im2rec tool](https://github.com/apache/incubator-mxnet/blob/master/tools/im2rec.py) provided by MXNet to convert the images to binary RecordIO format[1]. This format can reduce the storage overhead of the data set on the disk and improve the reading efficiency. If you want to learn more about how to read images, refer to the documentation for the [GluonCV Toolkit](https://gluon-cv.mxnet.io/).\n\n\n## Download the Data Set\n\nThe Pikachu data set in RecordIO format can be downloaded directly from the Internet. The operation for downloading the data set is defined in the function `_download_pikachu`.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport d2l\nfrom mxnet import gluon, image\nimport os\n\n# Save to the d2l package.\ndef download_pikachu(data_dir):\n root_url = ('https://apache-mxnet.s3-accelerate.amazonaws.com/'\n 'gluon/dataset/pikachu/')\n dataset = {'train.rec': 'e6bcb6ffba1ac04ff8a9b1115e650af56ee969c8',\n 'train.idx': 'dcf7318b2602c06428b9988470c731621716c393',\n 'val.rec': 'd6c33f799b4d058e82f2cb5bd9a976f69d72d520'}\n for k, v in dataset.items():\n gluon.utils.download(\n root_url + k, os.path.join(data_dir, k), sha1_hash=v)",
"_____no_output_____"
]
],
[
[
"## Read the Data Set\n\nWe are going to read the object detection data set by creating the instance `ImageDetIter`. The \"Det\" in the name refers to Detection. We will read the training data set in random order. Since the format of the data set is RecordIO, we need the image index file `'train.idx'` to read random mini-batches. In addition, for each image of the training set, we will use random cropping and require the cropped image to cover at least 95% of each object. Since the cropping is random, this requirement is not always satisfied. We preset the maximum number of random cropping attempts to 200. If none of them meets the requirement, the image will not be cropped. To ensure the certainty of the output, we will not randomly crop the images in the test data set. We also do not need to read the test data set in random order.",
"_____no_output_____"
]
],
[
[
"# Save to the d2l package.\ndef load_data_pikachu(batch_size, edge_size=256):\n \"\"\"Load the pikachu dataset\"\"\"\n data_dir = '../data/pikachu'\n download_pikachu(data_dir)\n train_iter = image.ImageDetIter(\n path_imgrec=os.path.join(data_dir, 'train.rec'),\n path_imgidx=os.path.join(data_dir, 'train.idx'),\n batch_size=batch_size,\n data_shape=(3, edge_size, edge_size), # The shape of the output image\n shuffle=True, # Read the data set in random order\n rand_crop=1, # The probability of random cropping is 1\n min_object_covered=0.95, max_attempts=200)\n val_iter = image.ImageDetIter(\n path_imgrec=os.path.join(data_dir, 'val.rec'), batch_size=batch_size,\n data_shape=(3, edge_size, edge_size), shuffle=False)\n return train_iter, val_iter",
"_____no_output_____"
]
],
[
[
"Below, we read a mini-batch and print the shape of the image and label. The shape of the image is the same as in the previous experiment (batch size, number of channels, height, width). The shape of the label is (batch size, $m$, 5), where $m$ is equal to the maximum number of bounding boxes contained in a single image in the data set. Although computation for the mini-batch is very efficient, it requires each image to contain the same number of bounding boxes so that they can be placed in the same batch. Since each image may have a different number of bounding boxes, we can add illegal bounding boxes to images that have less than $m$ bounding boxes until each image contains $m$ bounding boxes. Thus, we can read a mini-batch of images each time. The label of each bounding box in the image is represented by an array of length 5. The first element in the array is the category of the object contained in the bounding box. When the value is -1, the bounding box is an illegal bounding box for filling purpose. The remaining four elements of the array represent the $x, y$ axis coordinates of the upper-left corner of the bounding box and the $x, y$ axis coordinates of the lower-right corner of the bounding box (the value range is between 0 and 1). The Pikachu data set here has only one bounding box per image, so $m=1$.",
"_____no_output_____"
]
],
[
[
"batch_size, edge_size = 32, 256\ntrain_iter, _ = load_data_pikachu(batch_size, edge_size)\nbatch = train_iter.next()\nbatch.data[0].shape, batch.label[0].shape",
"_____no_output_____"
]
],
[
[
"## Graphic Data\n\nWe have ten images with bounding boxes on them. We can see that the angle, size, and position of Pikachu are different in each image. Of course, this is a simple man-made data set. In actual practice, the data is usually much more complicated.",
"_____no_output_____"
]
],
[
[
"imgs = (batch.data[0][0:10].transpose((0, 2, 3, 1))) / 255\naxes = d2l.show_images(imgs, 2, 5, scale=2)\nfor ax, label in zip(axes, batch.label[0][0:10]):\n d2l.show_bboxes(ax, [label[0][1:5] * edge_size], colors=['w'])",
"_____no_output_____"
]
],
[
[
"## Summary\n\n* The Pikachu data set we synthesized can be used to test object detection models.\n* The data reading for object detection is similar to that for image classification. However, after we introduce bounding boxes, the label shape and image augmentation (e.g., random cropping) are changed.\n\n\n## Exercises\n\n* Referring to the MXNet documentation, what are the parameters for the constructors of the `image.ImageDetIter` and `image.CreateDetAugmenter` classes? What is their significance?\n\n## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2452)\n\n![](../img/qr_object-detection-dataset.svg)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7fbffeed93e183ea84713cd6eefeee6d0d935a4 | 6,351 | ipynb | Jupyter Notebook | elastic-pynotebook.ipynb | willingc/bouncy-notes | c27a5eb512a0c73f6c1b9e3a0668a2a9fcf2486c | [
"MIT"
] | null | null | null | elastic-pynotebook.ipynb | willingc/bouncy-notes | c27a5eb512a0c73f6c1b9e3a0668a2a9fcf2486c | [
"MIT"
] | null | null | null | elastic-pynotebook.ipynb | willingc/bouncy-notes | c27a5eb512a0c73f6c1b9e3a0668a2a9fcf2486c | [
"MIT"
] | null | null | null | 22.682143 | 124 | 0.447646 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7fc053b76efaf2bd886cba73cb9ea93c3e575dc | 171,147 | ipynb | Jupyter Notebook | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project | ffe2aa36475e658a1d29853caa7cca53d34fe668 | [
"MIT"
] | null | null | null | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project | ffe2aa36475e658a1d29853caa7cca53d34fe668 | [
"MIT"
] | null | null | null | jupyter_notebooks/feature_engineering_pt1.ipynb | StevenVuong/Udacity-ML-Engineer-Nanodegree-Capstone-Project | ffe2aa36475e658a1d29853caa7cca53d34fe668 | [
"MIT"
] | null | null | null | 44.281242 | 7,774 | 0.438196 | [
[
[
"The Goal of this Notebook is to predict Future Sales given historical data (daily granularity). This is a part of the kaggle competition \"Predict Future Sales\": https://www.kaggle.com/c/competitive-data-science-predict-future-sales/data Where more information about the problem, dataset and other solutions can be found.\n\nFor my own usage, this is a part of the Capstone Project as part of the Udacity Machine Learning Engineer Nanodegree program and so am running this on AWS Sagemaker, with a conda_pytorch_36 shell.\n\nAuthor: Steven Vuong. <br>\nMost recent update: 25/05/2020",
"_____no_output_____"
]
],
[
[
"# mount gdrive\nfrom google.colab import drive\ndrive.mount('/gdrive')",
"Drive already mounted at /gdrive; to attempt to forcibly remount, call drive.mount(\"/gdrive\", force_remount=True).\n"
],
[
"# cd to dir\n% cd '../gdrive/My Drive/self_teach/udacity_ml_eng_nanodegree'",
"/gdrive/My Drive/self_teach/udacity_ml_eng_nanodegree\n"
],
[
"# Import Libraries\nimport pandas as pd\nimport numpy as np\nimport warnings\nfrom sklearn.preprocessing import LabelEncoder\n\n# Visualisation Libraries\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n# Styling Preferences\n%matplotlib inline\nsns.set(style=\"darkgrid\")\npd.set_option('display.float_format', lambda x: '%.2f' % x)\nwarnings.filterwarnings(\"ignore\")",
"/usr/local/lib/python3.6/dist-packages/statsmodels/tools/_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n"
]
],
[
[
"Before we begin, Thanks to the following notebooks who I gained some ideas from in feature engineering/visualisations (and took code snippets from). I would suggest having a look at their notebooks and work also, and if you like it, give them a thumbs up on Kaggle to support their work :)):\n- https://www.kaggle.com/dlarionov/feature-engineering-xgboost\n- https://www.kaggle.com/kyakovlev/1st-place-solution-part-1-hands-on-data\n- https://www.kaggle.com/dimitreoliveira/model-stacking-feature-engineering-and-eda",
"_____no_output_____"
]
],
[
[
"# Load in dataset (cast float64 -> float32 and int32 -> int16 to save memory)\nitems = pd.read_csv('./data/competition_files/items.csv', \n dtype={'item_name': 'str', 'item_id': 'int16', 'item_category_id': 'int16'}\n )\nshops = pd.read_csv('./data/competition_files/shops.csv',\n dtype={'shop_name': 'str', 'shop_id': 'int16'}\n )\ncategories = pd.read_csv('./data/competition_files/item_categories.csv', \n dtype={'item_category_name': 'str', 'item_category_id': 'int16'}\n )\ntrain = pd.read_csv('./data/competition_files/sales_train.csv', \n dtype={\n 'date': 'str', \n 'date_block_num': 'int16', \n 'shop_id': 'int16', \n 'item_id': 'int16', \n 'item_price': 'float32', \n 'item_cnt_day': 'int16'}\n )\n# set index to ID to avoid dropping it later\ntest = pd.read_csv('./data/competition_files/test.csv', \n dtype={'ID': 'int16', 'shop_id': 'int16', 'item_id': 'int16'}\n ).set_index('ID')",
"_____no_output_____"
],
[
"# Cast train date from string to datetime data type\ntrain.date = train.date.str.replace(\".\", \"/\")\ntrain.date = pd.to_datetime(train.date)",
"_____no_output_____"
]
],
[
[
"Join the different data sets; merge onto train df",
"_____no_output_____"
]
],
[
[
"train = train.join(\n items, on='item_id', rsuffix='_').join(\n shops, on='shop_id', rsuffix='_').join(\n categories, on='item_category_id', rsuffix='_').drop(\n ['item_id_', 'shop_id_', 'item_category_id_'], axis=1\n)",
"_____no_output_____"
]
],
[
[
"Probe the train data, it appears that there are no nan data, or missing data, which is quite good.",
"_____no_output_____"
]
],
[
[
"print(\"----------Top-5- Record----------\")\nprint(train.head(5))\nprint(\"-----------Information-----------\")\nprint(train.info())\nprint(\"-----------Data Types-----------\")\nprint(train.dtypes)\nprint(\"----------Missing value-----------\")\nprint(train.isnull().sum())\nprint(\"----------Null value-----------\")\nprint(train.isna().sum())\nprint(\"----------Shape of Data----------\")\nprint(\"Number of rows = {}, Number of columns = {}\".format(len(train), len(train.columns)))\nprint(\"----------Data Description----------\")\nprint(train.describe())",
"----------Top-5- Record----------\n date ... item_category_name\n0 2013-02-01 ... Кино - Blu-Ray\n1 2013-03-01 ... Музыка - Винил\n2 2013-05-01 ... Музыка - Винил\n3 2013-06-01 ... Музыка - Винил\n4 2013-01-15 ... Музыка - CD фирменного производства\n\n[5 rows x 10 columns]\n-----------Information-----------\n<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2935849 entries, 0 to 2935848\nData columns (total 10 columns):\n # Column Dtype \n--- ------ ----- \n 0 date datetime64[ns]\n 1 date_block_num int16 \n 2 shop_id int16 \n 3 item_id int16 \n 4 item_price float32 \n 5 item_cnt_day int16 \n 6 item_name object \n 7 item_category_id int16 \n 8 shop_name object \n 9 item_category_name object \ndtypes: datetime64[ns](1), float32(1), int16(5), object(3)\nmemory usage: 128.8+ MB\nNone\n-----------Data Types-----------\ndate datetime64[ns]\ndate_block_num int16\nshop_id int16\nitem_id int16\nitem_price float32\nitem_cnt_day int16\nitem_name object\nitem_category_id int16\nshop_name object\nitem_category_name object\ndtype: object\n----------Missing value-----------\ndate 0\ndate_block_num 0\nshop_id 0\nitem_id 0\nitem_price 0\nitem_cnt_day 0\nitem_name 0\nitem_category_id 0\nshop_name 0\nitem_category_name 0\ndtype: int64\n----------Null value-----------\ndate 0\ndate_block_num 0\nshop_id 0\nitem_id 0\nitem_price 0\nitem_cnt_day 0\nitem_name 0\nitem_category_id 0\nshop_name 0\nitem_category_name 0\ndtype: int64\n----------Shape of Data----------\nNumber of rows = 2935849, Number of columns = 10\n----------Data Description----------\n date_block_num shop_id ... item_cnt_day item_category_id\ncount 2935849.00 2935849.00 ... 2935849.00 2935849.00\nmean 14.57 33.00 ... 1.24 40.00\nstd 9.42 16.23 ... 2.62 17.10\nmin 0.00 0.00 ... -22.00 0.00\n25% 7.00 22.00 ... 1.00 28.00\n50% 14.00 31.00 ... 1.00 40.00\n75% 23.00 47.00 ... 1.00 55.00\nmax 33.00 59.00 ... 2169.00 83.00\n\n[8 rows x 6 columns]\n"
],
[
"# look at time period of data\nprint('Min date from train set: %s' % train['date'].min().date())\nprint('Max date from train set: %s' % train['date'].max().date())",
"Min date from train set: 2013-01-01\nMax date from train set: 2015-12-10\n"
]
],
[
[
"Data is from 1st January 2013 to 10th Decemer 2015, as we expect",
"_____no_output_____"
],
[
"So it turns out that a lot of data in the training set for columns \"shop_id\" and \"item_id\" does not appear in the test set. This could be perhaps because the item is no longer on sale as time goes on or shops have closed down or moved addresses. As we want to predict data in the test set, we will focus on only using \"shop_id\" and \"item_id\" that appears in the test set. \n\nThese rows may contain information so could be worth keeping as an extra column (commented out) indicating whether or not the train_id or shop_id is in the test set. Unfortunately however, we are tight on memory and so will not be doing that in this notebook.\n\nTo make this more future proof where the \"shop_id\" and \"item_id\" might change over time (in a production environment, let's say), one may want to consider a data pipeline to constantly train and update our model with the latest information regarding shop_id and item_id's etc.. ",
"_____no_output_____"
]
],
[
[
"test_shop_ids = test['shop_id'].unique()\ntest_item_ids = test['item_id'].unique()\n\n# Only shops that exist in test set.\ncorrlate_train = train[train['shop_id'].isin(test_shop_ids)]\n# Only items that exist in test set.\ncorrelate_train = corrlate_train[corrlate_train['item_id'].isin(test_item_ids)]",
"_____no_output_____"
],
[
"print('Initial data set size :', train.shape[0])\nprint('Data set size after matching crossovers between train and test:', correlate_train.shape[0])",
"Initial data set size : 2935849\nData set size after matching crossovers between train and test: 1224439\n"
],
[
"# Make separate column to indicate whether or not the train_id and shop_id is in test\n# train['is_in_test'] = train.index.isin(correlate_train.index)\n# train.head()",
"_____no_output_____"
],
[
"# Reduce train set to just match ones in test set regarding train_id and shop_id\ntrain = correlate_train\nlen(train)",
"_____no_output_____"
]
],
[
[
"It appears we have 5 duplicated rows, let's look into these",
"_____no_output_____"
]
],
[
[
"print('Number of duplicates:', len(train[train.duplicated()]))",
"Number of duplicates: 5\n"
]
],
[
[
"The Itetm ID's are all the same, as well as the price for a number of them; other columns such as date, date_block_num look different. So this appears not to be a mistake. As there are only 5 duplicated rows, we will leave these in for now and deal with these later.",
"_____no_output_____"
]
],
[
[
"train[train.duplicated()]",
"_____no_output_____"
]
],
[
[
"Plot the train data; look for outliers. It seems like there are a few with item price > 100000 and with item count per day > 1000. We will remove these from our training set.",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,4))\nplt.xlim(-100, 3000)\nsns.boxplot(x=train.item_cnt_day)\n\nplt.figure(figsize=(10,4))\nplt.xlim(train.item_price.min(), train.item_price.max()*1.1)\nsns.boxplot(x=train.item_price)",
"_____no_output_____"
],
[
"train = train[train.item_price<100000]\ntrain = train[train.item_cnt_day<1000]",
"_____no_output_____"
],
[
"plt.figure(figsize=(10,4))\nplt.xlim(-100, 3000)\nsns.boxplot(x=train.item_cnt_day)\n\nplt.figure(figsize=(10,4))\nplt.xlim(train.item_price.min(), train.item_price.max()*1.1)\nsns.boxplot(x=train.item_price)\n\nplt.show()",
"_____no_output_____"
]
],
[
[
"Looking better after having removed outliers.",
"_____no_output_____"
],
[
"Fill any item_price < 0 with the median item price median.",
"_____no_output_____"
]
],
[
[
"# Calculate the item price median\nmedian = train.item_price.median()\nprint(\"Item Price Median = {}\".format(median))\ntrain.loc[train.item_price<0, 'item_price'] = median",
"Item Price Median = 549.0\n"
],
[
"# Double there are no item price rows < 0\ntrain.loc[train.item_price<0, 'item_price'] ",
"_____no_output_____"
]
],
[
[
"Count number of rows with item_cnt_day < 0; seems too many to be anomalous and could be an important feature. We will leave this in our dataset.",
"_____no_output_____"
]
],
[
[
"len(train.loc[train.item_cnt_day<0, 'item_cnt_day'])",
"_____no_output_____"
]
],
[
[
"Some shops are duplicates of each other (according to name), we will fix these in our train and test set.",
"_____no_output_____"
]
],
[
[
"# Якутск Орджоникидзе, 56\ntrain.loc[train.shop_id == 0, 'shop_id'] = 57\ntest.loc[test.shop_id == 0, 'shop_id'] = 57\n\n# Якутск ТЦ \"Центральный\"\ntrain.loc[train.shop_id == 1, 'shop_id'] = 58\ntest.loc[test.shop_id == 1, 'shop_id'] = 58\n\n# Жуковский ул. Чкалова 39м²\ntrain.loc[train.shop_id == 10, 'shop_id'] = 11\ntest.loc[test.shop_id == 10, 'shop_id'] = 11",
"_____no_output_____"
]
],
[
[
"Process \"Shop_name\" column -> shop name begins with city name.",
"_____no_output_____"
]
],
[
[
"# Fix erroneous shop name title\ntrain.loc[train.shop_name == 'Сергиев Посад ТЦ \"7Я\"', 'shop_name'] = 'СергиевПосад ТЦ \"7Я\"'",
"_____no_output_____"
],
[
"# Create a column for city\ntrain['city'] = train['shop_name'].str.split(' ').map(lambda x: x[0])\ntrain.head()",
"_____no_output_____"
],
[
"# Fix a city name (typo)\ntrain.loc[train.city == '!Якутск', 'city'] = 'Якутск'",
"_____no_output_____"
],
[
"# Encode the city name into a code column\ntrain['city_code'] = LabelEncoder().fit_transform(train['city'])\ntrain.head()",
"_____no_output_____"
]
],
[
[
"Each category name contains type and subtype in its name. Treat this similarly as to how we treated shop name, split into separate columns and encode into labels (one hot encoding).",
"_____no_output_____"
]
],
[
[
"# Create separate column with split category name\ntrain['split_category_name'] = train['item_category_name'].str.split('-')\ntrain.head()",
"_____no_output_____"
],
[
"# Make column for category type and encode\ntrain['item_category_type'] = train['split_category_name'].map(lambda x : x[0].strip())\ntrain['item_category_type_code'] = LabelEncoder().fit_transform(train['item_category_type'])\n\ntrain.head()",
"_____no_output_____"
],
[
"# Do the same for subtype, make column wiht name if nan then set to the type\ntrain['item_category_subtype'] = train['split_category_name'].map(\n lambda x: x[1].strip() if len(x) > 1 else x[0].strip()\n)\n# Make separate encoded column\ntrain['item_category_subtype_code'] = LabelEncoder().fit_transform(train['item_category_subtype'])\ntrain.head()",
"_____no_output_____"
]
],
[
[
"We can now drop the following columns, having captured and encoded the necessary information from them:\n- shop_name\n- item_category_name\n- split_category_name\n- item_category_type\n- item_category_subtype",
"_____no_output_____"
]
],
[
[
"train = train.drop(['shop_name', \n 'item_category_name', \n 'split_category_name', \n 'item_category_type', \n 'item_category_subtype',\n ], axis = 1)\ntrain.head()",
"_____no_output_____"
]
],
[
[
"Looking at item name, perhaps we can reduce the number of unique types, as there are too many at the moment which our model might struggle with, so we will try to categorise some of these by just taking the first part of an item name and encoding this.",
"_____no_output_____"
]
],
[
[
"print(\"Number of unique Item names = {}\".format(len(train.item_name.unique())))",
"Number of unique Item names = 4716\n"
],
[
"# Split item name, extracting first word of the string\ntrain['item_name_split'] = train['item_name'].str.split(' ').map(lambda x : x[0].strip())\ntrain.head()",
"_____no_output_____"
],
[
"print(\"Number of unique Item First Words = {}\".format(len(train['item_name_split'].unique())))",
"Number of unique Item First Words = 1590\n"
]
],
[
[
"This seems substantial enough, so we will encode this once again into another column.",
"_____no_output_____"
]
],
[
[
"train['item_name_code'] = LabelEncoder().fit_transform(train['item_name_split'])\ntrain.head()",
"_____no_output_____"
]
],
[
[
"And now we can drop the following columns:\n- item_name\n- item_name_split\n- city (forgot to drop in last round)",
"_____no_output_____"
]
],
[
[
"train = train.drop(['item_name', \n 'item_name_split',\n 'city'\n ], axis = 1)\ntrain.head()",
"_____no_output_____"
]
],
[
[
"So the features above are the ones so far deemed as useful and thus are kept on. We will group by month into dataframe; then by the other columns and then aggregate the item price and count, determining the mean average and sum per month.",
"_____no_output_____"
]
],
[
[
"print(len(train))",
"1224437\n"
],
[
"# Group by month (date_block_num)\n# Could do more complex, just want something very basic to aggregate\ntrain_by_month = train.sort_values('date').groupby([\n 'date_block_num', \n 'item_category_type_code',\n 'item_category_subtype_code',\n 'item_name_code',\n 'city_code',\n 'shop_id', \n 'item_category_id',\n 'item_id',\n # Keep simple; will just use the above columns \n], as_index=False)\ntrain_by_month.size()",
"_____no_output_____"
],
[
"# everything is organised by date block num, great!\ntrain_by_month.head().head()",
"_____no_output_____"
],
[
"train_by_month.head()",
"_____no_output_____"
],
[
"# Aggregate item price and item count\ntrain_by_month = train_by_month.agg({'item_price':['sum', 'mean'], 'item_cnt_day':['sum', 'mean','count']})",
"_____no_output_____"
],
[
"train_by_month.head()",
"_____no_output_____"
],
[
"# See how many rows we now have\nlen(train_by_month)",
"_____no_output_____"
],
[
"# Sanity check on number of months\ntrain_by_month.date_block_num.unique()",
"_____no_output_____"
],
[
"# Rename columns\ntrain_by_month.columns = ['date_block_num', \n 'item_category_type_code',\n 'item_category_subtype_code',\n 'item_name_code',\n 'city_code',\n 'shop_id', \n 'item_category_id',\n 'item_id',\n 'sum_item_price',\n 'mean_item_price',\n 'sum_item_count',\n 'mean_item_count',\n 'transactions']\ntrain_by_month.head()",
"_____no_output_____"
]
],
[
[
"As we have to apply predictions to the test set, we must ensure all possible combinations of \"shop_id\" and \"item_id\" are covered. \n\nTo do this, we will loop through all possible combinations in our test set and append to an empty dataframe. Then we will merge that empty dataframe to our main dataframe and fill in missing na values with 0.",
"_____no_output_____"
]
],
[
[
"# Get all unique shop id's and item id's\nshop_ids = test['shop_id'].unique()\nitem_ids = test['item_id'].unique()\n# Initialise empty df\nempty_df = []\n# Loop through months and append to dataframe\nfor i in range(34):\n for item in item_ids:\n for shop in shop_ids:\n empty_df.append([i, shop, item])\n# Turn into dataframe\nempty_df = pd.DataFrame(empty_df, columns=['date_block_num','shop_id','item_id'])",
"_____no_output_____"
],
[
"# Merge monthly train set with the complete set (missing records will be filled with 0).\ntrain_by_month = pd.merge(train_by_month, empty_df, on=['date_block_num','shop_id','item_id'], how='outer')\nlen(train_by_month)",
"_____no_output_____"
],
[
"# Double check we have no na records\ntrain_by_month.isna().sum()",
"_____no_output_____"
]
],
[
[
"The fact we have so many na is quiet concerning. Perhaps many more item_id or shop_id values were added in the most recent month (test data) that is not included in the training data. Whilst there may be better ways of dealing with this, we will be fill the missing na records with 0 and progress.",
"_____no_output_____"
]
],
[
[
"# Filll missing records with na\ntrain_by_month.fillna(0, inplace=True)",
"_____no_output_____"
],
[
"train_by_month.isna().sum()",
"_____no_output_____"
],
[
"train_by_month.describe()",
"_____no_output_____"
]
],
[
[
"In this first feature-engineering notebook, we have inspected the data, removed outliers and identified features (as well as engineer others) we would like to use for further feature engineering to train our model with. \n\nAs our feature-engineering steps are quite numerous, we will split it up into separate notebooks, more to come in part-2.",
"_____no_output_____"
]
],
[
[
"# Save this as a csv \ntrain_by_month.to_csv('./data/output/processed_data_pt1.csv', index=False, header=True)",
"_____no_output_____"
],
[
"",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
]
] |
e7fc055e83432b991ecf55f33b114de88b5604fe | 33,917 | ipynb | Jupyter Notebook | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | assignments/assignment1/Linear classifier_solution.ipynb | tbb/dlcourse_ai | d8a14d30f7174b449c9bb79f3b87d4822d4f0f4b | [
"MIT"
] | null | null | null | 46.717631 | 10,620 | 0.71159 | [
[
[
"# Задание 1.2 - Линейный классификатор (Linear classifier)\n\nВ этом задании мы реализуем другую модель машинного обучения - линейный классификатор. Линейный классификатор подбирает для каждого класса веса, на которые нужно умножить значение каждого признака и потом сложить вместе.\nТот класс, у которого эта сумма больше, и является предсказанием модели.\n\nВ этом задании вы:\n- потренируетесь считать градиенты различных многомерных функций\n- реализуете подсчет градиентов через линейную модель и функцию потерь softmax\n- реализуете процесс тренировки линейного классификатора\n- подберете параметры тренировки на практике\n\nНа всякий случай, еще раз ссылка на туториал по numpy: \nhttp://cs231n.github.io/python-numpy-tutorial/",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\n%load_ext autoreload\n%autoreload 2",
"_____no_output_____"
],
[
"from dataset import load_svhn, random_split_train_val\nfrom gradient_check_solution import check_gradient\nfrom metrics_solution import multiclass_accuracy \nimport linear_classifer_solution as linear_classifer",
"_____no_output_____"
]
],
[
[
"# Как всегда, первым делом загружаем данные\n\nМы будем использовать все тот же SVHN.",
"_____no_output_____"
]
],
[
[
"def prepare_for_linear_classifier(train_X, test_X):\n train_flat = train_X.reshape(train_X.shape[0], -1).astype(np.float) / 255.0\n test_flat = test_X.reshape(test_X.shape[0], -1).astype(np.float) / 255.0\n \n # Subtract mean\n mean_image = np.mean(train_flat, axis = 0)\n train_flat -= mean_image\n test_flat -= mean_image\n \n # Add another channel with ones as a bias term\n train_flat_with_ones = np.hstack([train_flat, np.ones((train_X.shape[0], 1))])\n test_flat_with_ones = np.hstack([test_flat, np.ones((test_X.shape[0], 1))]) \n return train_flat_with_ones, test_flat_with_ones\n \ntrain_X, train_y, test_X, test_y = load_svhn(\"data\", max_train=10000, max_test=1000) \ntrain_X, test_X = prepare_for_linear_classifier(train_X, test_X)\n# Split train into train and val\ntrain_X, train_y, val_X, val_y = random_split_train_val(train_X, train_y, num_val = 1000)",
"_____no_output_____"
]
],
[
[
"# Играемся с градиентами!\n\nВ этом курсе мы будем писать много функций, которые вычисляют градиенты аналитическим методом.\n\nНеобходимым инструментом во время реализации кода, вычисляющего градиенты, является функция его проверки. Эта функция вычисляет градиент численным методом и сверяет результат с градиентом, вычисленным аналитическим методом.\n\nМы начнем с того, чтобы реализовать вычисление численного градиента (numeric gradient) в этой функции.\nВычислите градиент с помощью численной производной для каждой координаты. Для вычисления производной используйте так называемую two-point formula (https://en.wikipedia.org/wiki/Numerical_differentiation):\n\n![image](https://wikimedia.org/api/rest_v1/media/math/render/svg/22fc2c0a66c63560a349604f8b6b39221566236d)\n",
"_____no_output_____"
]
],
[
[
"# TODO: Implement gradient check function\ndef sqr(x):\n return x*x, 2*x\n\ncheck_gradient(sqr, np.array([3.0]))\n\ndef array_sum(x):\n assert x.shape == (2,), x.shape\n return np.sum(x), np.ones_like(x)\n\ncheck_gradient(array_sum, np.array([3.0, 2.0]))\n\ndef array_2d_sum(x):\n assert x.shape == (2,2)\n return np.sum(x), np.ones_like(x)\n\ncheck_gradient(array_2d_sum, np.array([[3.0, 2.0], [1.0, 0.0]]))",
"Gradient check passed!\nGradient check passed!\nGradient check passed!\n"
]
],
[
[
"Теперь реализуем функцию softmax, которая получает на вход оценки для каждого класса и преобразует их в вероятности от 0 до 1:\n![image](https://wikimedia.org/api/rest_v1/media/math/render/svg/e348290cf48ddbb6e9a6ef4e39363568b67c09d3)\n\n**Важно:** Практический аспект вычисления этой функции заключается в том, что в ней учавствует вычисление экспоненты от потенциально очень больших чисел - это может привести к очень большим значениям в числителе и знаменателе за пределами диапазона float.\n\nК счастью, у этой проблемы есть простое решение -- перед вычислением softmax вычесть из всех оценок максимальное значение среди всех оценок:\n```\npredictions -= np.max(predictions)\n```\n(подробнее здесь - http://cs231n.github.io/linear-classify/#softmax, секция `Practical issues: Numeric stability`)",
"_____no_output_____"
]
],
[
[
"# TODO Implement softmax and cross-entropy for single sample\nprobs = linear_classifer.softmax(np.array([-10, 0, 10]))\n\n# Make sure it works for big numbers too!\nprobs = linear_classifer.softmax(np.array([1000, 0, 0]))\nassert np.isclose(probs[0], 1.0)",
"_____no_output_____"
]
],
[
[
"Кроме этого, мы реализуем cross-entropy loss, которую мы будем использовать как функцию ошибки (error function).\nВ общем виде cross-entropy определена следующим образом:\n![image](https://wikimedia.org/api/rest_v1/media/math/render/svg/0cb6da032ab424eefdca0884cd4113fe578f4293)\n\nгде x - все классы, p(x) - истинная вероятность принадлежности сэмпла классу x, а q(x) - вероятность принадлежности классу x, предсказанная моделью. \nВ нашем случае сэмпл принадлежит только одному классу, индекс которого передается функции. Для него p(x) равна 1, а для остальных классов - 0. \n\nЭто позволяет реализовать функцию проще!",
"_____no_output_____"
]
],
[
[
"probs = linear_classifer.softmax(np.array([-5, 0, 5]))\nlinear_classifer.cross_entropy_loss(probs, 1)",
"_____no_output_____"
]
],
[
[
"После того как мы реализовали сами функции, мы можем реализовать градиент.\n\nОказывается, что вычисление градиента становится гораздо проще, если объединить эти функции в одну, которая сначала вычисляет вероятности через softmax, а потом использует их для вычисления функции ошибки через cross-entropy loss.\n\nЭта функция `softmax_with_cross_entropy` будет возвращает и значение ошибки, и градиент по входным параметрам. Мы проверим корректность реализации с помощью `check_gradient`.",
"_____no_output_____"
]
],
[
[
"# TODO Implement combined function or softmax and cross entropy and produces gradient\nloss, grad = linear_classifer.softmax_with_cross_entropy(np.array([1, 0, 0]), 1)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, 1), np.array([1, 0, 0], np.float))",
"Gradient check passed!\n"
]
],
[
[
"В качестве метода тренировки мы будем использовать стохастический градиентный спуск (stochastic gradient descent или SGD), который работает с батчами сэмплов. \n\nПоэтому все наши фукнции будут получать не один пример, а батч, то есть входом будет не вектор из `num_classes` оценок, а матрица размерности `batch_size, num_classes`. Индекс примера в батче всегда будет первым измерением.\n\nСледующий шаг - переписать наши функции так, чтобы они поддерживали батчи.\n\nФинальное значение функции ошибки должно остаться числом, и оно равно среднему значению ошибки среди всех примеров в батче.",
"_____no_output_____"
]
],
[
[
"# TODO Extend combined function so it can receive a 2d array with batch of samples\n\n# Test batch_size = 1\nbatch_size = 1\npredictions = np.zeros((batch_size, 3))\ntarget_index = np.ones(batch_size, np.int)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)\n\n# Test batch_size = 3\nbatch_size = 3\npredictions = np.zeros((batch_size, 3))\ntarget_index = np.ones(batch_size, np.int)\ncheck_gradient(lambda x: linear_classifer.softmax_with_cross_entropy(x, target_index), predictions)",
"Gradient check passed!\nGradient check passed!\n"
]
],
[
[
"### Наконец, реализуем сам линейный классификатор!\n\nsoftmax и cross-entropy получают на вход оценки, которые выдает линейный классификатор.\n\nОн делает это очень просто: для каждого класса есть набор весов, на которые надо умножить пиксели картинки и сложить. Получившееся число и является оценкой класса, идущей на вход softmax.\n\nТаким образом, линейный классификатор можно представить как умножение вектора с пикселями на матрицу W размера `num_features, num_classes`. Такой подход легко расширяется на случай батча векторов с пикселями X размера `batch_size, num_features`:\n\n`predictions = X * W`, где `*` - матричное умножение.\n\nРеализуйте функцию подсчета линейного классификатора и градиентов по весам `linear_softmax` в файле `linear_classifer.py`",
"_____no_output_____"
]
],
[
[
"# TODO Implement linear_softmax function that uses softmax with cross-entropy for linear classifier\nbatch_size = 2\nnum_classes = 2\nnum_features = 3\nnp.random.seed(42)\nW = np.random.randint(-1, 3, size=(num_features, num_classes)).astype(np.float)\nX = np.random.randint(-1, 3, size=(batch_size, num_features)).astype(np.float)\ntarget_index = np.ones(batch_size, dtype=np.int)\n\nloss, dW = linear_classifer.linear_softmax(X, W, target_index)\ncheck_gradient(lambda w: linear_classifer.linear_softmax(X, w, target_index), W)",
"Gradient check passed!\n"
]
],
[
[
"### И теперь регуляризация\n\nМы будем использовать L2 regularization для весов как часть общей функции ошибки.\n\nНапомним, L2 regularization определяется как\n\nl2_reg_loss = regularization_strength * sum<sub>ij</sub> W[i, j]<sup>2</sup>\n\nРеализуйте функцию для его вычисления и вычисления соотвествующих градиентов.",
"_____no_output_____"
]
],
[
[
"# TODO Implement l2_regularization function that implements loss for L2 regularization\nlinear_classifer.l2_regularization(W, 0.01)\ncheck_gradient(lambda w: linear_classifer.l2_regularization(w, 0.01), W)",
"Gradient check passed!\n"
]
],
[
[
"# Тренировка!",
"_____no_output_____"
],
[
"Градиенты в порядке, реализуем процесс тренировки!",
"_____no_output_____"
]
],
[
[
"# TODO: Implement LinearSoftmaxClassifier.fit function\nclassifier = linear_classifer.LinearSoftmaxClassifier()\nloss_history = classifier.fit(train_X, train_y, epochs=30, learning_rate=1e-3, batch_size=300, reg=1e1)",
"Epoch 0, loss: 2.483635\nEpoch 1, loss: 2.356035\nEpoch 2, loss: 2.318027\nEpoch 3, loss: 2.306778\nEpoch 4, loss: 2.303391\nEpoch 5, loss: 2.302379\nEpoch 6, loss: 2.302109\nEpoch 7, loss: 2.302001\nEpoch 8, loss: 2.301971\nEpoch 9, loss: 2.301986\nEpoch 10, loss: 2.301975\nEpoch 11, loss: 2.301978\nEpoch 12, loss: 2.301966\nEpoch 13, loss: 2.301992\nEpoch 14, loss: 2.301981\nEpoch 15, loss: 2.301984\nEpoch 16, loss: 2.301973\nEpoch 17, loss: 2.301971\nEpoch 18, loss: 2.301966\nEpoch 19, loss: 2.301998\nEpoch 20, loss: 2.302002\nEpoch 21, loss: 2.301977\nEpoch 22, loss: 2.301962\nEpoch 23, loss: 2.301958\nEpoch 24, loss: 2.301981\nEpoch 25, loss: 2.301970\nEpoch 26, loss: 2.301958\nEpoch 27, loss: 2.301969\nEpoch 28, loss: 2.301974\nEpoch 29, loss: 2.301977\n"
],
[
"# let's look at the loss history!\nplt.plot(loss_history);",
"_____no_output_____"
],
[
"# Let's check how it performs on validation set\npred = classifier.predict(val_X)\naccuracy = multiclass_accuracy(pred, val_y)\nprint(\"Accuracy: \", accuracy)\n\n# Now, let's train more and see if it performs better\nclassifier.fit(train_X, train_y, epochs=100, learning_rate=1e-3, batch_size=300, reg=1e1)\npred = classifier.predict(val_X)\naccuracy = multiclass_accuracy(pred, val_y)\nprint(\"Accuracy after training for 100 epochs: \", accuracy)",
"Accuracy: 0.145\nEpoch 0, loss: 2.301971\nEpoch 1, loss: 2.301977\nEpoch 2, loss: 2.301983\nEpoch 3, loss: 2.301990\nEpoch 4, loss: 2.301970\nEpoch 5, loss: 2.301979\nEpoch 6, loss: 2.301968\nEpoch 7, loss: 2.301989\nEpoch 8, loss: 2.301976\nEpoch 9, loss: 2.301980\nEpoch 10, loss: 2.301986\nEpoch 11, loss: 2.301982\nEpoch 12, loss: 2.301993\nEpoch 13, loss: 2.301974\nEpoch 14, loss: 2.301999\nEpoch 15, loss: 2.301972\nEpoch 16, loss: 2.301976\nEpoch 17, loss: 2.301989\nEpoch 18, loss: 2.301968\nEpoch 19, loss: 2.301983\nEpoch 20, loss: 2.301982\nEpoch 21, loss: 2.301983\nEpoch 22, loss: 2.301975\nEpoch 23, loss: 2.301981\nEpoch 24, loss: 2.301990\nEpoch 25, loss: 2.301996\nEpoch 26, loss: 2.301979\nEpoch 27, loss: 2.301980\nEpoch 28, loss: 2.301974\nEpoch 29, loss: 2.301978\nEpoch 30, loss: 2.301972\nEpoch 31, loss: 2.301977\nEpoch 32, loss: 2.301991\nEpoch 33, loss: 2.301983\nEpoch 34, loss: 2.301986\nEpoch 35, loss: 2.301970\nEpoch 36, loss: 2.301983\nEpoch 37, loss: 2.302006\nEpoch 38, loss: 2.301975\nEpoch 39, loss: 2.301975\nEpoch 40, loss: 2.301974\nEpoch 41, loss: 2.301977\nEpoch 42, loss: 2.301963\nEpoch 43, loss: 2.301973\nEpoch 44, loss: 2.301981\nEpoch 45, loss: 2.301978\nEpoch 46, loss: 2.301970\nEpoch 47, loss: 2.301976\nEpoch 48, loss: 2.301974\nEpoch 49, loss: 2.301988\nEpoch 50, loss: 2.301970\nEpoch 51, loss: 2.302000\nEpoch 52, loss: 2.301989\nEpoch 53, loss: 2.301979\nEpoch 54, loss: 2.301973\nEpoch 55, loss: 2.301989\nEpoch 56, loss: 2.301984\nEpoch 57, loss: 2.301964\nEpoch 58, loss: 2.301977\nEpoch 59, loss: 2.301970\nEpoch 60, loss: 2.301976\nEpoch 61, loss: 2.301992\nEpoch 62, loss: 2.301982\nEpoch 63, loss: 2.301992\nEpoch 64, loss: 2.301977\nEpoch 65, loss: 2.301983\nEpoch 66, loss: 2.301959\nEpoch 67, loss: 2.301976\nEpoch 68, loss: 2.301975\nEpoch 69, loss: 2.301986\nEpoch 70, loss: 2.301995\nEpoch 71, loss: 2.301974\nEpoch 72, loss: 2.301960\nEpoch 73, loss: 2.301993\nEpoch 74, loss: 2.301976\nEpoch 75, loss: 2.301969\nEpoch 76, loss: 2.301978\nEpoch 77, loss: 2.301972\nEpoch 78, loss: 2.301979\nEpoch 79, loss: 2.301968\nEpoch 80, loss: 2.301962\nEpoch 81, loss: 2.301983\nEpoch 82, loss: 2.301975\nEpoch 83, loss: 2.301961\nEpoch 84, loss: 2.301973\nEpoch 85, loss: 2.301976\nEpoch 86, loss: 2.301993\nEpoch 87, loss: 2.301971\nEpoch 88, loss: 2.301970\nEpoch 89, loss: 2.301989\nEpoch 90, loss: 2.301989\nEpoch 91, loss: 2.301989\nEpoch 92, loss: 2.301978\nEpoch 93, loss: 2.301983\nEpoch 94, loss: 2.301976\nEpoch 95, loss: 2.301968\nEpoch 96, loss: 2.301969\nEpoch 97, loss: 2.301986\nEpoch 98, loss: 2.301984\nEpoch 99, loss: 2.301975\nAccuracy after training for 100 epochs: 0.15\n"
]
],
[
[
"### Как и раньше, используем кросс-валидацию для подбора гиперпараметтов.\n\nВ этот раз, чтобы тренировка занимала разумное время, мы будем использовать только одно разделение на тренировочные (training) и проверочные (validation) данные.\n\nТеперь нам нужно подобрать не один, а два гиперпараметра! Не ограничивайте себя изначальными значениями в коде. \nДобейтесь точности более чем **20%** на проверочных данных (validation data).",
"_____no_output_____"
]
],
[
[
"import itertools",
"_____no_output_____"
],
[
"num_epochs = 200\nbatch_size = 300\n\nlearning_rates = [1e-3, 1e-4, 1e-5]\nreg_strengths = [1e-4, 1e-5, 1e-6]\n\nbest_classifier = None\nbest_val_accuracy = -float(\"inf\")\n\n# TODO use validation set to find the best hyperparameters\n# hint: for best results, you might need to try more values for learning rate and regularization strength \n# than provided initially\n\nfor learning_rate, reg_strength in itertools.product(learning_rates, reg_strengths):\n \n classifier = linear_classifer.LinearSoftmaxClassifier()\n classifier.fit(train_X, train_y, verbose=False,\n epochs=num_epochs, batch_size=batch_size,\n learning_rate=learning_rate,\n reg=reg_strength)\n \n pred = classifier.predict(val_X)\n accuracy = multiclass_accuracy(pred, val_y)\n if accuracy > best_val_accuracy:\n best_classifier = classifier\n best_val_accuracy = accuracy\n\n\nprint('best validation accuracy achieved: %f' % best_val_accuracy)",
"best validation accuracy achieved: 0.215000\n"
]
],
[
[
"# Какой же точности мы добились на тестовых данных?",
"_____no_output_____"
]
],
[
[
"test_pred = best_classifier.predict(test_X)\ntest_accuracy = multiclass_accuracy(test_pred, test_y)\nprint('Linear softmax classifier test set accuracy: %f' % (test_accuracy, ))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fc1596117e260d74d035ec4dcb35d4c50827b7 | 53,604 | ipynb | Jupyter Notebook | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments | 92713be96a89da991fe53bffcc596a5c0942fc37 | [
"MIT"
] | 1 | 2020-09-19T22:08:51.000Z | 2020-09-19T22:08:51.000Z | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments | 92713be96a89da991fe53bffcc596a5c0942fc37 | [
"MIT"
] | 9 | 2017-12-11T19:15:33.000Z | 2018-04-18T19:08:34.000Z | figures/Check-SED.ipynb | benjaminrose/SNIa-Local-Environments | 92713be96a89da991fe53bffcc596a5c0942fc37 | [
"MIT"
] | 3 | 2020-08-13T03:45:09.000Z | 2020-08-19T22:31:00.000Z | 169.097792 | 26,822 | 0.888068 | [
[
[
"# Is the SED Correct?\n\nIn the circle test, the SFH is totatlly bonkers. We just can not get the correct SFH back out with MCMC. Is the MCMC getting a good fit?",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt",
"_____no_output_____"
],
[
"wavelengths = [3551, 4686, 6166, 7480, 8932] # for u, g, r, i, z filters\nfilters = ['u', 'g', 'r', 'i', 'z']",
"_____no_output_____"
]
],
[
[
"## Input \n\nText\n\nSo from:\n\nlogzsol | dust2| $\\tau$| tStart| sfTrans| sfSlope\n--------|------|----|-------|--------|---------\n-0.5| 0.1| 0.5| 1.5| 9.0| -1.0\n\nwe get\n\nu| g| r| i| z\n-|--|--|--|--\n45.36|43.76|42.99|42.67|42.39\n\nThis SED gets 25 magnitues subtracted from (`c` paramter in fit) it get it to a resonable magnitude. FSPS only calcualtes for 1 solar mass, so this factor is a scaling factor that is related to the total solar mass observed.\n\n## Fit 1 \n\nFirst we did our normal fit. The oddest part was that `logzsol` wanted the smallest value possible. This was most odd because the prior is a Gaussian centered at -0.5 (this happens to be the input value) with a width of 0.5 dex. I also have a low cut off, just cause, of -2.5. \n\nThis fit gives us \n\nlogzsol | dust2| $\\tau$| tStart| sfTrans| sfSlope | c\n--------|------|----|-------|--------|----|-----\n-2.5| 0.01| 7.17| 7.94| 10.40| -5.24| -23.48\n\nand and SED of \n\nu| g| r| i| z\n-|--|--|--|--\n43.31|42.06|41.76|41.67|41.62\n\n\n## Fit 2\n\nI changed the low cut off, in part becasue nothing else seemed to effect the metalicity paramter fit. With it now set at no lower then -1.0 the fit gives us:\n\nlogzsol | dust2| $\\tau$| tStart| sfTrans| sfSlope | c\n--------|------|----|-------|--------|----|-----\n-1.0| 0.25| 5.67| 1.94| 4.93| 1.64| -22.85\n\nand and SED of \n\nu| g| r| i| z\n-|--|--|--|--\n42.28|41.43|41.23|41.01|40.99\n\n## Fit 3\n\nFinally I \"fixed\" the metalicity to the known value of -0.5, because these previouse fits just still did not want to get things correct. This fit gives us:\n\nlogzsol | dust2| $\\tau$| tStart| sfTrans| sfSlope | c\n--------|------|----|-------|--------|----|-----\n-0.51| 0.32| 8.17| 8.42| 10.76| 4.72| -22.17\n\nand and SED of \n\nu| g| r| i| z\n-|--|--|--|--\n41.53|40.70|40.55|40.33|40.30\n\n\n**None** of these are correct. ",
"_____no_output_____"
]
],
[
[
"input_sed = np.array([45.36, 43.76, 42.99, 42.67, 42.39])\ninput_c = -25\nfit1_sed = np.array([43.31, 42.06, 41.76, 41.67, 41.62])\nfit1_c = -23.48\nfit2_sed = np.array([42.28, 41.43, 41.23, 41.01, 40.99])\nfit2_c = -22.85\nfit3_sed = np.array([41.53, 40.70, 40.55, 40.33, 40.30])\nfit3_c = -22.1",
"_____no_output_____"
],
[
"plt.figure('fit test')",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1,1) \n\nax.plot(wavelengths, input_sed+input_c, label='Input Values')\n# ax.plot(wavelengths, [20.36, 18.76, 17.99, 17.67, 17.39]) # the in text file numbers.\nax.plot(wavelengths, fit1_sed+fit1_c, label='Full Fit')\nax.plot(wavelengths, fit2_sed+fit2_c, label='Smaller $\\log(Z_{sol})$ range')\nax.plot(wavelengths, fit3_sed+fit3_c, label='Fixed $\\log(Z_{sol})$')\n\nplt.gca().invert_yaxis()\nax.set_xticks(wavelengths)\nax.set_xticklabels(filters)\nax.set_xlabel('SDSS Filters')\nax.set_ylabel('Magnitude [mag]')\n\nplt.legend()\n# plt.savefig('2017-08-09- not getting correct sed.pdf')\nplt.show()",
"_____no_output_____"
]
],
[
[
"# Check Newer Resutls\n\nOn 2017-08-24 I re-ran the whole analaysis method and it got a closer answer on the circle test (particually with the log(Z_sol)) but it was not perfect. Here I want to compare the SED outputed results.",
"_____no_output_____"
]
],
[
[
"fit0824_sed = np.array([42.29, 41.43, 41.21, 40.98, 40.93])\nfit0824_c = -25.70",
"_____no_output_____"
],
[
"plt.figure('newer fit test')",
"_____no_output_____"
],
[
"fig, ax = plt.subplots(1,1) \n\nax.plot(wavelengths, input_sed+input_c, label='Input Values')\n# ax.plot(wavelengths, [20.36, 18.76, 17.99, 17.67, 17.39]) # the in text file numbers.\nax.plot(wavelengths, fit1_sed+fit1_c, label='Old Full Fit')\nax.plot(wavelengths, fit0824_sed+fit0824_c, label='08-24 Fit')\n\nplt.gca().invert_yaxis()\nax.set_xticks(wavelengths)\nax.set_xticklabels(filters)\nax.set_xlabel('SDSS Filters')\nax.set_ylabel('Magnitude [mag]')\n\nplt.legend()\nplt.savefig('2017-09-05 not getting correct sed.pdf')\nplt.show()",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7fc1d74a65f9e55845d7b3c68a1e4da2d9381f2 | 65,946 | ipynb | Jupyter Notebook | pt1/scraping_classwork.ipynb | mengyuan616/scraping-lecture | e385f460beb316759dd8fc772e9d7705d4fdf1d9 | [
"MIT"
] | null | null | null | pt1/scraping_classwork.ipynb | mengyuan616/scraping-lecture | e385f460beb316759dd8fc772e9d7705d4fdf1d9 | [
"MIT"
] | null | null | null | pt1/scraping_classwork.ipynb | mengyuan616/scraping-lecture | e385f460beb316759dd8fc772e9d7705d4fdf1d9 | [
"MIT"
] | null | null | null | 35.340836 | 96 | 0.394444 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7fc27e0968497ef49e3253e59d28d3a7875bd01 | 150,979 | ipynb | Jupyter Notebook | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj | 979b11d106dd14bfa76d286ed2fc85f77fb93802 | [
"MIT"
] | null | null | null | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj | 979b11d106dd14bfa76d286ed2fc85f77fb93802 | [
"MIT"
] | null | null | null | 006_bd_proj_dennis.ipynb | sh5864/bigdata-proj | 979b11d106dd14bfa76d286ed2fc85f77fb93802 | [
"MIT"
] | null | null | null | 140.445581 | 27,074 | 0.576014 | [
[
[
"**Summary Of Findings**:\nIt was found that wildfire frequency across the United State has been increasing in the past decade. Although fire and fire damage was generally localized to mostly the west coast in the past, fire frequency has been gradually increasing in states east of it in the continental US; in 2021, midwestern states have had fire counts similar to those found in West Coast states in 2014 and 2015. \n\n Although fire frequency has been increasing, the overall area of land affected by wildfires has remained within a similar range for the past 20 years. It was also found that the number of recorded fires, did not necessarily correlate with the area affected for each states. While the degree of fire coverage has remained relatively consistent, the distribution of burned area across the United States has changed over the years. In the early 2000s, the majority of wildfire area was almost entirely localized to Alaska and the West coast; by 2021, the majority of the US had seen more than minimal fire coverage. \n\n Throughout the past decade, hot spots on on the continental US have remained relatively consistent; the west coast will probably continue to be considered hot spots, and some may become prominent in the Midwest. Regardless of hot spots, fire activity has generally increased across the United States.",
"_____no_output_____"
]
],
[
[
"!apt-get install openjdk-8-jdk-headless -qq > /dev/null",
"_____no_output_____"
],
[
"",
"_____no_output_____"
],
[
"!wget https://dlcdn.apache.org/spark/spark-3.2.0/spark-3.2.0-bin-hadoop3.2.tgz",
"--2021-12-14 04:34:01-- https://dlcdn.apache.org/spark/spark-3.2.0/spark-3.2.0-bin-hadoop3.2.tgz\nResolving dlcdn.apache.org (dlcdn.apache.org)... 151.101.2.132, 2a04:4e42::644\nConnecting to dlcdn.apache.org (dlcdn.apache.org)|151.101.2.132|:443... connected.\nHTTP request sent, awaiting response... 200 OK\nLength: 300965906 (287M) [application/x-gzip]\nSaving to: ‘spark-3.2.0-bin-hadoop3.2.tgz’\n\nspark-3.2.0-bin-had 100%[===================>] 287.02M 180MB/s in 1.6s \n\n2021-12-14 04:34:03 (180 MB/s) - ‘spark-3.2.0-bin-hadoop3.2.tgz’ saved [300965906/300965906]\n\n"
]
],
[
[
"",
"_____no_output_____"
]
],
[
[
"!tar xvzf spark-3.2.0-bin-hadoop3.2.tgz",
"_____no_output_____"
],
[
"!ls /content/spark-3.2.0-bin-hadoop3.2",
"bin data\tjars\t LICENSE NOTICE R\t\t RELEASE yarn\nconf examples\tkubernetes licenses python README.md sbin\n"
],
[
"# Set the ‘environment’ path\nimport os\n#os.environ[\"JAVA_HOME\"] = \"/usr/lib/jvm/java-8-openjdk-amd64\"\nos.environ[\"SPARK_HOME\"] = \"/content/spark-3.2.0-bin-hadoop3.2\"",
"_____no_output_____"
],
[
"!pip install -q findspark\n\nimport findspark\nfindspark.init()",
"_____no_output_____"
],
[
"from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nfrom pyspark.sql.types import *\nfrom pyspark.sql.window import Window\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nspark = SparkSession.builder\\\n .master(\"local[*]\")\\\n .appName(\"final-project\")\\\n .getOrCreate()\n\nsc = spark.sparkContext\nsc.setLogLevel(\"ERROR\")",
"_____no_output_____"
],
[
"sc",
"_____no_output_____"
],
[
"#The wildfire location database \nlocatData = spark.read.option(\"header\",True) \\\n.option(\"inferSchema\", True) \\\n.csv(\"WFIGS_-_Wildland_Fire_Locations_Full_History.csv\")",
"_____no_output_____"
]
],
[
[
"Fire Frequency By Year",
"_____no_output_____"
]
],
[
[
"#Fires not considered \"wildfire\" are first filtered out\n#locTime will be used to focus on the frequency of wildfires per state\n#POOState - Location of wildfire at time of discovery\n#FireDiscoveryDateTime - Date when the fire was discovered.\n\nlocatData = locatData.filter(locatData[\"IncidentTypeCategory\"] == \"WF\")\nlocTime = locatData.select(substring(locatData[\"POOState\"],0,6).alias(\"State Occurred\"),\n substring(locatData['FireDiscoveryDateTime'],0,4).alias(\"Year\"))",
"_____no_output_____"
],
[
"#Unusable rows are filtered out.\nlocTime = locTime.filter((locTime[\"year\"].isNotNull())& (locTime[\"State Occurred\"].isNotNull()))\\",
"_____no_output_____"
]
],
[
[
"A significant difference between the wildfire frequency was found between 2013 and 2014; it is assumed the years before 2014 had incomplete data.",
"_____no_output_____"
]
],
[
[
"\nlocTime.groupBy(\"Year\").count().orderBy(\"year\").show()\n",
"+----+-----+\n|Year|count|\n+----+-----+\n|2003| 1|\n|2004| 1|\n|2008| 1|\n|2009| 1|\n|2010| 2|\n|2014|12634|\n|2015|19633|\n|2016|19798|\n|2017|25114|\n|2018|22627|\n|2019|25451|\n|2020|33348|\n|2021|34488|\n+----+-----+\n\n"
]
],
[
[
"Number of Fires across the US per state",
"_____no_output_____"
]
],
[
[
"#To gain insights into the US results, areas outside the US are filtered out.\nlocTime = locTime.filter(locTime[\"Year\"] > 2013)\nlocTime = locTime.filter(locTime[\"State Occurred\"].contains(\"US\"))\nlocTime = locTime.withColumn(\"State Occurred\",substring(locTime[\"State Occurred\"],4,6))",
"_____no_output_____"
],
[
"locTime.show()",
"+--------------+----+\n|State Occurred|Year|\n+--------------+----+\n| MT|2020|\n| CA|2020|\n| MT|2017|\n| CA|2019|\n| CA|2016|\n| MS|2020|\n| UT|2019|\n| MT|2020|\n| SD|2020|\n| CA|2015|\n| FL|2019|\n| AZ|2019|\n| CA|2017|\n| ID|2020|\n| CA|2016|\n| MT|2019|\n| UT|2016|\n| MS|2019|\n| MN|2016|\n| CA|2017|\n+--------------+----+\nonly showing top 20 rows\n\n"
],
[
"totalFiresPerState = locTime.groupBy(\"State Occurred\").count()",
"_____no_output_____"
],
[
"import plotly.express as px\nimport pandas as pd\n\nfig = px.choropleth(totalFiresPerState.toPandas(), locations='State Occurred',locationmode = \"USA-states\",color = \"count\", \n scope='usa')\nfig.update_layout(\n width=800,\n height=600)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"Findings:\nFrom the figure above, it can be seen that fires in the last decade have most occurred in the western portion of the United States, and have been mostly prevalent on the west coast as well as Montana and Arizona.",
"_____no_output_____"
],
[
"Number of Fires Per Year Per State",
"_____no_output_____"
]
],
[
[
"firePerState = locTime.filter(locTime[\"year\"].isNotNull())\\\n.groupBy(\"year\",'State Occurred').count().orderBy(\"Year\")",
"_____no_output_____"
],
[
"firePerState.show()",
"+----+--------------+-----+\n|year|State Occurred|count|\n+----+--------------+-----+\n|2014| NM| 517|\n|2014| UT| 909|\n|2014| WI| 11|\n|2014| HI| 2|\n|2014| VA| 14|\n|2014| WA| 827|\n|2014| GA| 25|\n|2014| WY| 320|\n|2014| MI| 66|\n|2014| ID| 993|\n|2014| WV| 1|\n|2014| AR| 15|\n|2014| MO| 7|\n|2014| SC| 21|\n|2014| NE| 12|\n|2014| NV| 529|\n|2014| PA| 16|\n|2014| AZ| 948|\n|2014| CO| 619|\n|2014| AL| 32|\n+----+--------------+-----+\nonly showing top 20 rows\n\n"
],
[
"import plotly.express as px\nimport pandas as pd\n\nfig = px.choropleth(firePerState.toPandas(), locations='State Occurred',locationmode = \"USA-states\",color = \"count\",range_color = [0,5000],\n animation_frame=\"year\", animation_group=\"State Occurred\",scope='usa')\nfig.update_layout(\n width=800,\n height=600)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"**Findings** : From the above, figure, we see a general rise in wildfire occurences over the years. The west coast has consistently had the highest number of fires over the years. Originally the majority of fires had been originating in the west coast, but states east of it have steadily seen increasing occurences.In 2021, midwestern states such as North Dakota and Minnesota have had fire counts similar to those of western states in 2014 and 2015. It should be noted that data for 2021 is incomplete, so there may still be a gradual increase in fire count over the year.",
"_____no_output_____"
],
[
"Acres Burned In Historical Data Across the US",
"_____no_output_____"
]
],
[
[
"#Primarily tracks historical fire Perimeters from 2000-2018\noldPerimData = spark.read.option(\"header\",True) \\\n.option(\"inferSchema\", True) \\\n.csv(\"Historic_GeoMAC_Perimeters_Combined_2000-2018.csv\")",
"_____no_output_____"
],
[
"#Meaningful data is cleaned and selected\noldPerimTime = oldPerimData.select((oldPerimData[\"state\"]).alias(\"State Occurred\"),\n oldPerimData[\"gisacres\"].alias(\"area(acres)\"),\n oldPerimData['fireyear'].alias(\"year\"))\noldPerimTime = oldPerimTime.filter(oldPerimTime[\"year\"].isNotNull())\noldPerimTime = oldPerimTime.filter(oldPerimTime[\"year\"].cast(\"int\").isNotNull())",
"_____no_output_____"
],
[
"oldOverall = oldPerimTime.groupBy(\"year\").agg(sum('area(acres)').alias(\"area (acres)\")).orderBy(\"year\")",
"_____no_output_____"
],
[
"#The Data in this csv primarily tracks the area of each recorded fire; data is mostly available for 2020 and 2021.\nperimData = spark.read.option(\"header\",True) \\\n.option(\"inferSchema\", True) \\\n.csv(\"WFIGS_-_Wildland_Fire_Perimeters_Full_History.csv\")",
"_____no_output_____"
],
[
"#Data similar to columns found in oldPerimTime is cleaned and selected here.\nrecentTime = perimData.select(substring(perimData[\"irwin_POOState\"],4,6).alias(\"State Occurred\"),\n perimData[\"poly_Acres_AutoCalc\"].alias(\"area(acres)\"),\n substring(perimData['irwin_ContainmentDateTime'],0,4).alias(\"year\"))\nrecentTime = recentTime.filter(recentTime[\"year\"].isNotNull())",
"_____no_output_____"
],
[
"recentOverall = recentTime.groupBy(\"year\").agg(sum('area(acres)').alias(\"area (acres)\")).orderBy(\"year\")\nrecentOverall = recentOverall.filter((recentOverall[\"year\"] == 2020) | (recentOverall[\"year\"] == 2021))",
"_____no_output_____"
],
[
"recentOverall.show()",
"_____no_output_____"
],
[
"combinedOverall = oldOverall.union(recentOverall)",
"_____no_output_____"
],
[
"yearMonth = combinedOverall.select(\"year\").rdd.flatMap(lambda x: x).collect()\nareaDamage = combinedOverall.select(\"area (acres)\").rdd.flatMap(lambda x: x).collect()\nticks = [0,5,10,15,20]\nplt.plot(yearMonth,areaDamage)\nplt.xticks(ticks)\nplt.xlabel(\"Year\")\nplt.ylabel(\"Area Affected (acres)\")\nplt.title(\"Wildfire Damage from 2000-2021\")",
"_____no_output_____"
]
],
[
[
"**Findings**: In the above figure, it is found that the total area damaged by wildfires has been inconsistent throughout the past two decades; while fires are increasing in frequency, the area affected does not necessarily increase.",
"_____no_output_____"
],
[
"Total Area Burned Per State",
"_____no_output_____"
]
],
[
[
"damagePerState = oldPerimTime.union(recentTime)",
"_____no_output_____"
],
[
"damagePerStateOverall= damagePerState.groupBy(\"State Occurred\").agg(sum('area(acres)').alias(\"total area burned (acres)\"))",
"_____no_output_____"
],
[
"import plotly.express as px\nimport pandas as pd\nfig = px.choropleth(damagePerStateOverall.toPandas(), locations='State Occurred',locationmode = \"USA-states\",color = \"total area burned (acres)\", \n scope='usa')\nfig.update_layout(\n width=800,\n height=600)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"**Findings**: The above map shows that the most significant damage was found on the west coast; this is similar and supports the findings found in the occurences map. Some States that had a high number of fire occurences such as Texas have not seen proportional quantities of acres burned. In contrast to its low number of reported fires over the years, Alaska has the most significant fire damage found of any state.",
"_____no_output_____"
],
[
"Area Burned Per State Per Month",
"_____no_output_____"
]
],
[
[
"damagePerStateYearly= damagePerState.groupBy(\"year\",\"State Occurred\").agg(sum('area(acres)').alias(\"total area burned (acres)\")).orderBy(\"year\")",
"_____no_output_____"
],
[
"import plotly.express as px\nimport pandas as pd\nfig = px.choropleth(damagePerStateYearly.toPandas(), locations='State Occurred',locationmode = \"USA-states\",color = \"total area burned (acres)\", \n range_color = [0,1000000],animation_frame=\"year\", animation_group=\"State Occurred\",scope='usa')\nfig.update_layout(\n width=800,\n height=600)\n\nfig.show()",
"_____no_output_____"
]
],
[
[
"**Findings**: Until 2005, damage was almost exclusively reported on the west coast.From 2010 onwards, most of the states had at least some reported damage. Although Alaska has the most damage overall, the area burned in the state has lessened recently. Although the total area burned has remained inconsistent throughout the years, it has been more evenly distributed throughout the United States. It should be noted that a lack of data from 2019 may affect the trends.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7fc2fcab649236fce830ceb290b98b2c3113f4f | 297,181 | ipynb | Jupyter Notebook | MusicRecommendation/.ipynb_checkpoints/TestHDFTables-checkpoint.ipynb | HiKapok/KaggleCompetitions | 3d8c0e8d8f98334980c97f761262316edcd6d5e9 | [
"MIT"
] | 1 | 2018-06-27T14:14:01.000Z | 2018-06-27T14:14:01.000Z | MusicRecommendation/.ipynb_checkpoints/TestHDFTables-checkpoint.ipynb | HiKapok/KaggleCompetitions | 3d8c0e8d8f98334980c97f761262316edcd6d5e9 | [
"MIT"
] | 1 | 2017-12-30T01:01:52.000Z | 2018-01-05T04:09:32.000Z | MusicRecommendation/.ipynb_checkpoints/TestHDFTables-checkpoint.ipynb | HiKapok/KaggleCompetitions | 3d8c0e8d8f98334980c97f761262316edcd6d5e9 | [
"MIT"
] | 1 | 2018-06-27T14:14:16.000Z | 2018-06-27T14:14:16.000Z | 72.289224 | 385 | 0.177922 | [
[
[
"# The line below sets the environment\n# variable CUDA_VISIBLE_DEVICES\nget_ipython().magic('env CUDA_VISIBLE_DEVICES = 1')\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport multiprocessing as mp # will come in handy due to the size of the data\nimport os.path\nimport random\nimport time\nimport io\nfrom datetime import datetime\nimport gc # garbage collector\nimport sklearn\nimport xgboost as xgb\nfrom sklearn.preprocessing import LabelEncoder\nimport math\nfrom collections import defaultdict\nimport re\nimport logging\n\n# This is a bit of magic to make matplotlib figures appear inline in the notebook\n# rather than in a new window.\nget_ipython().magic('matplotlib inline')\nplt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\n# Some more magic so that the notebook will reload external python modules;\n# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython\nget_ipython().magic('load_ext autoreload')\nget_ipython().magic('autoreload 2')",
"env: CUDA_VISIBLE_DEVICES=1\n"
]
],
[
[
"## Write a pandas dataframe to disk as gunzip compressed csv\n- df.to_csv('dfsavename.csv.gz', compression='gzip')\n\n## Read from disk\n- df = pd.read_csv('dfsavename.csv.gz', compression='gzip')\n\n## Magic useful\n- %%timeit for the whole cell\n- %timeit for the specific line\n- %%latex to render the cell as a block of latex\n- %prun and %%prun",
"_____no_output_____"
]
],
[
[
"DATASET_PATH = '/media/rs/0E06CD1706CD0127/Kapok/WSDM/'\nHDF_FILENAME = DATASET_PATH + 'music_info.h5'\nHDF_TRAIN_FEATURE_FILENAME = DATASET_PATH + 'music_train_feature_part.h5'\nHDF_TEST_FEATURE_FILENAME = DATASET_PATH + 'music_test_feature_part.h5'",
"_____no_output_____"
],
[
"def set_logging(logger_name, logger_file_name):\n log = logging.getLogger(logger_name)\n log.setLevel(logging.DEBUG)\n\n # create formatter and add it to the handlers\n print_formatter = logging.Formatter('%(message)s')\n file_formatter = logging.Formatter('%(asctime)s - %(name)s_%(levelname)s: %(message)s')\n\n # create file handler which logs even debug messages\n fh = logging.FileHandler(logger_file_name, mode='w')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(file_formatter)\n log.addHandler(fh)\n # both output to console and file\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(print_formatter)\n log.addHandler(consoleHandler)\n \n return log",
"_____no_output_____"
],
[
"log = set_logging('MUSIC', DATASET_PATH + 'music_test_xgboost.log')\nlog.info('here is an info message.')",
"here is an info message.\n"
],
[
"store_data = pd.HDFStore(HDF_FILENAME)",
"_____no_output_____"
],
[
"log.info(store_data['all_train_withextra'].head())",
" msno \\\n0 FGtllVqz18RPiwJj/edr2gV78zirAiY/9SmYvia+kCg= \n1 Xumu+NIjS6QYVxDS4/t3SawvJ7viT9hPKXmf0RtLNx8= \n2 Xumu+NIjS6QYVxDS4/t3SawvJ7viT9hPKXmf0RtLNx8= \n3 Xumu+NIjS6QYVxDS4/t3SawvJ7viT9hPKXmf0RtLNx8= \n4 FGtllVqz18RPiwJj/edr2gV78zirAiY/9SmYvia+kCg= \n\n song_id source_system_tab \\\n0 BBzumQNXUHKdEBOB7mAJuzok+IJA1c2Ryg/yzTF6tik= explore \n1 bhp/MpSNoqoxOIB+/l8WPqu6jldth4DIpCm3ayXnJqM= my library \n2 JNWfrrC7zNN7BdMpsISKa4Mw+xVJYNnxXh3/Epw7QgY= my library \n3 2A87tzfnJTSWqD7gIZHisolhe4DMdzkbd6LzO1KHjNs= my library \n4 3qm6XTZ6MOCU11x8FIVbAGH5l5uMkT3/ZalWG1oo2Gc= explore \n\n source_screen_name source_type target city bd gender \\\n0 Explore online-playlist 1 1 0 nan \n1 Local playlist more local-playlist 1 13 24 female \n2 Local playlist more local-playlist 1 13 24 female \n3 Local playlist more local-playlist 1 13 24 female \n4 Explore online-playlist 1 1 0 nan \n\n registered_via registration_init_time expiration_date song_length genre_ids \\\n0 7 20120102 20171005 206471.0 359 \n1 9 20110525 20170911 284584.0 1259 \n2 9 20110525 20170911 225396.0 1259 \n3 9 20110525 20170911 255512.0 1019 \n4 7 20120102 20171005 187802.0 1011 \n\n artist_name composer lyricist language \\\n0 Bastille Dan Smith| Mark Crew nan 52.0 \n1 Various Artists nan nan 52.0 \n2 Nas N. Jones、W. Adams、J. Lordan、D. Ingle nan 52.0 \n3 Soundway Kwadwo Donkoh nan -1.0 \n4 Brett Young Brett Young| Kelly Archer| Justin Ebach nan 52.0 \n\n name isrc \n0 Good Grief GBUM71602854 \n1 Lords of Cardboard US3C69910183 \n2 Hip Hop Is Dead(Album Version (Edited)) USUM70618761 \n3 Disco Africa GBUQH1000063 \n4 Sleep Without You QM3E21606003 \n"
],
[
"def clip_by_percent(hist, num_percent):\n return hist[(hist >= hist[int( len(hist.index) * num_percent )]) == True]\ndef clip_by_value(hist, value):\n return hist[(hist >= value) == True]",
"_____no_output_____"
],
[
"def create_bag_of_words(input_df, percent, column_name):\n input_hist = input_df[column_name].value_counts(sort=True, ascending=False)\n input_select = clip_by_percent(input_hist, percent).index\n log.info('{} item are selected.'.format(len(input_select)))\n # the total number of the other items\n total_others = np.sum(input_hist) - np.sum(input_hist[input_select])\n # all hist values are log transformed accouting the popularity\n clip_hist_with_log = defaultdict(lambda: np.log(total_others))\n for k,v in dict(np.log(input_hist[input_select])).items():\n clip_hist_with_log[k] = v\n# print(input_hist[input_select]) \n# print(dict(np.log(input_hist[input_select])))\n input_map = defaultdict(lambda: column_name + ' ' + 'others')\n for input_item in input_select:\n input_map[input_item] = column_name + ' ' + input_item\n # item name in input_map are \"column_name + ' ' + input_item\"\n # item name in clip_hist_with_log are \"input_item\"\n return input_map, clip_hist_with_log",
"_____no_output_____"
],
[
"# 181 ms ± 420 µs\ndef word_bag_encode(input_data, column, word_map, word_hist):\n col_index = input_data.columns.get_loc(column) + 1\n count_list = [0 for _ in range(len(word_map))]\n count_dict = dict(zip(list(word_map.keys()), count_list))\n count_dict['others'] = 0\n new_columns = [column + ' ' + s for s in count_dict.keys()]\n all_df = pd.DataFrame(data = None, columns = new_columns)\n delay_rate = 0.8 # must be less than 1\n for cur_row in input_data.itertuples():\n if isinstance(cur_row[col_index], str): \n df = pd.DataFrame([list(count_dict.values())], columns=new_columns)\n splited_list = re.split(r'[|/]+',cur_row[col_index])\n list_len = len(splited_list)\n # the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1\n # so according to the geometric series summation formula, the iniatial weight are caculate as follow\n initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))\n for index, s in enumerate(splited_list): \n word_stripped = s.strip(' \\\"\\t\\s\\n')\n df[word_map.get(word_stripped, column + ' others')] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]\n # defaultdict will auto insert missing key\n #df[word_map[word_stripped]] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]\n initial_weight *= delay_rate\n all_df = all_df.append(df, ignore_index=True)\n # NAN fix\n else:\n all_df = all_df.append(pd.DataFrame([[0] * len(new_columns)], columns=new_columns), ignore_index=True)\n return all_df",
"_____no_output_____"
],
[
"# 7.09 ms ± 43.2 µs\ndef word_bag_encode_apply(input_data, column, word_map, word_hist):\n new_columns = [column + ' ' + s for s in word_map.keys()]\n new_columns.append(column + ' ' + 'others')\n delay_rate = 0.8 # must be less than 1\n \n def encode_routine(str_value):\n series_dict = dict(zip(new_columns, [0.] * len(new_columns)))\n if isinstance(str_value, str): \n splited_list = re.split(r'[|/]+',str_value)\n list_len = len(splited_list)\n # the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1\n # so according to the geometric series summation formula, the iniatial weight are caculate as follow\n initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))\n for index, s in enumerate(splited_list): \n word_stripped = s.strip(' \\\"\\t\\s\\n')\n series_dict[word_map.get(word_stripped, column + ' others')] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]\n initial_weight *= delay_rate\n return pd.Series(series_dict)\n return input_data[column].apply(lambda s: encode_routine(s))",
"_____no_output_____"
],
[
"# 171 µs ± 693 ns\ndef word_bag_encode_numpy(input_data, column, word_map, word_hist):\n new_columns = [s for s in word_map.keys()]\n new_columns.append('others')\n delay_rate = 0.8 # must be less than 1\n num_columns = len(new_columns)\n str_indice_dict = dict(zip(new_columns, list(range(num_columns))))\n def encode_routine(str_value):\n temp_hist = np.zeros(num_columns, dtype=float)\n if isinstance(str_value, str): \n splited_list = re.split(r'[|/]+',str_value)\n list_len = len(splited_list)\n # the weight of each position of the array, are decayed by the ratio delay_rate, and their sum are 1\n # so according to the geometric series summation formula, the iniatial weight are caculate as follow\n initial_weight = (1-delay_rate)/(1 - np.power(delay_rate, list_len))\n for index, s in enumerate(splited_list): \n word_stripped = s.strip(' \\\"\\t\\s\\n')\n temp_hist[str_indice_dict.get(word_stripped, num_columns-1)] += initial_weight / (word_hist.get(word_stripped, word_hist['others'])) #word_hist[word_stripped]\n initial_weight *= delay_rate\n return temp_hist\n # actually we cannot use vectorize #vf = np.vectorize(encode_routine)\n\n #def fromiter(x):\n #return np.fromiter((f(xi) for xi in x), x.dtype)\n\n numpy_str = np.array(input_data[column].values, dtype=object)\n #return np.array(map(encode_routine, numpy_str))\n #return np.fromiter((encode_routine(xi) for xi in numpy_str), numpy_str.dtype, count=len(numpy_str))\n return np.array([encode_routine(xi) for xi in numpy_str]), [column + ' ' + s for s in new_columns]",
"_____no_output_____"
],
[
"def feature_encoder_impl(source_data, column_name, map_dict, hist_dict):\n feature_array, head_name = word_bag_encode_numpy(source_data, column_name, map_dict, hist_dict)\n return pd.DataFrame(data = feature_array, columns = head_name)",
"_____no_output_____"
],
[
"def feature_encoder(filename_to_store, music_info_data, key_to_encode, batch_size):\n total_num_examples = len(music_info_data[key_to_encode].index)\n num_steps = int(total_num_examples / batch_size) + 1\n cur_step = 0\n next_step = 0\n composer_map, composer_hist = create_bag_of_words(music_info_data['all_composer'], 0.001, 'composer')\n artist_name_map, artist_name_hist = create_bag_of_words(music_info_data['all_artist_name'], 0.001, 'artist_name')\n lyricist_map, lyricist_hist = create_bag_of_words(music_info_data['all_lyricist'], 0.002, 'lyricist')\n h5store = pd.HDFStore(filename_to_store, mode='w', complib='zlib', complevel=1)\n for _step in range(num_steps):\n start_time = time.time()\n cur_batch_size = _step + 1 == num_steps and total_num_examples - cur_step or batch_size\n next_step = cur_step + cur_batch_size\n cur_batch_data = store_data[key_to_encode][cur_step:next_step]\n composer_feature = feature_encoder_impl(cur_batch_data, 'composer', composer_map, composer_hist)\n artist_name_feature = feature_encoder_impl(cur_batch_data, 'artist_name', artist_name_map, artist_name_hist)\n lyricist_feature = feature_encoder_impl(cur_batch_data, 'lyricist', lyricist_map, lyricist_hist)\n cur_batch_data.drop('composer', axis=1, inplace=True)\n cur_batch_data.drop('artist_name', axis=1, inplace=True)\n cur_batch_data.drop('lyricist', axis=1, inplace=True)\n #print(pd.concat([cur_batch_data, composer_feature, artist_name_feature, lyricist_feature], join='inner', axis=1, copy=True))\n #break\n table_to_save = pd.concat([composer_feature, artist_name_feature, lyricist_feature], join='inner', axis=1, copy=True)\n #print(dict(zip(table_to_save.columns, [150]*len(table_to_save.columns))))\n #break\n\n# if _step == 0:\n# h5store.append(key_to_encode, table_to_save, min_itemsize=dict(zip(table_to_save.columns, [150]*len(table_to_save.columns))))\n# else:\n h5store.append(key_to_encode, table_to_save)\n #break\n time_elapsed = time.time() - start_time\n if _step % 5 == 0:\n log.info('cur step: {} of {}, from {} to {}, {:5.3f}sec/batch.'.format(_step, num_steps, cur_step, next_step, time_elapsed))\n# print(composer_feature)\n# print(artist_name_feature)\n# print(lyricist_feature)\n# break\n cur_step = next_step\n ",
"_____no_output_____"
],
[
"log.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\nfeature_encoder(HDF_TRAIN_FEATURE_FILENAME, store_data, 'all_train_withextra', 102400)\nfeature_encoder(HDF_TEST_FEATURE_FILENAME, store_data, 'all_test_withextra', 102400)\nstore_data.close()\nlog.info(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))",
"2017-10-26 10:59:19\n309 item are selected.\n232 item are selected.\n227 item are selected.\ncur step: 0 of 73, from 0 to 102400, 33.533sec/batch.\ncur step: 5 of 73, from 512000 to 614400, 36.732sec/batch.\ncur step: 10 of 73, from 1024000 to 1126400, 38.136sec/batch.\ncur step: 15 of 73, from 1536000 to 1638400, 36.956sec/batch.\ncur step: 20 of 73, from 2048000 to 2150400, 38.226sec/batch.\ncur step: 25 of 73, from 2560000 to 2662400, 36.120sec/batch.\ncur step: 30 of 73, from 3072000 to 3174400, 35.056sec/batch.\ncur step: 35 of 73, from 3584000 to 3686400, 34.683sec/batch.\ncur step: 40 of 73, from 4096000 to 4198400, 33.965sec/batch.\ncur step: 45 of 73, from 4608000 to 4710400, 36.831sec/batch.\ncur step: 50 of 73, from 5120000 to 5222400, 35.498sec/batch.\ncur step: 55 of 73, from 5632000 to 5734400, 37.884sec/batch.\ncur step: 60 of 73, from 6144000 to 6246400, 37.281sec/batch.\ncur step: 65 of 73, from 6656000 to 6758400, 36.716sec/batch.\ncur step: 70 of 73, from 7168000 to 7270400, 37.722sec/batch.\n309 item are selected.\n232 item are selected.\n227 item are selected.\ncur step: 0 of 25, from 0 to 102400, 18.410sec/batch.\ncur step: 5 of 25, from 512000 to 614400, 19.403sec/batch.\ncur step: 10 of 25, from 1024000 to 1126400, 22.217sec/batch.\ncur step: 15 of 25, from 1536000 to 1638400, 20.614sec/batch.\ncur step: 20 of 25, from 2048000 to 2150400, 19.979sec/batch.\n2017-10-26 11:52:11\n"
],
[
"h5store = pd.HDFStore(HDF_TRAIN_FEATURE_FILENAME, complib='zlib', complevel=1)",
"_____no_output_____"
],
[
"%%timeit\nprint(h5store.select('all_train_withextra','index>0 & index<10000'))",
" composer Bill Evan composer Hank William composer Max Martin \\\n1 0.0 0.0 0.0 \n2 0.0 0.0 0.0 \n3 0.0 0.0 0.0 \n4 0.0 0.0 0.0 \n5 0.0 0.0 0.0 \n6 0.0 0.0 0.0 \n7 0.0 0.0 0.0 \n8 0.0 0.0 0.0 \n9 0.0 0.0 0.0 \n10 0.0 0.0 0.0 \n11 0.0 0.0 0.0 \n12 0.0 0.0 0.0 \n13 0.0 0.0 0.0 \n14 0.0 0.0 0.0 \n15 0.0 0.0 0.0 \n16 0.0 0.0 0.0 \n17 0.0 0.0 0.0 \n18 0.0 0.0 0.0 \n19 0.0 0.0 0.0 \n20 0.0 0.0 0.0 \n21 0.0 0.0 0.0 \n22 0.0 0.0 0.0 \n23 0.0 0.0 0.0 \n24 0.0 0.0 0.0 \n25 0.0 0.0 0.0 \n26 0.0 0.0 0.0 \n27 0.0 0.0 0.0 \n28 0.0 0.0 0.0 \n29 0.0 0.0 0.0 \n30 0.0 0.0 0.0 \n... ... ... ... \n4588 0.0 0.0 0.0 \n4589 0.0 0.0 0.0 \n4590 0.0 0.0 0.0 \n4591 0.0 0.0 0.0 \n4592 0.0 0.0 0.0 \n4593 0.0 0.0 0.0 \n4594 0.0 0.0 0.0 \n4595 0.0 0.0 0.0 \n4596 0.0 0.0 0.0 \n4597 0.0 0.0 0.0 \n4598 0.0 0.0 0.0 \n4599 0.0 0.0 0.0 \n4600 0.0 0.0 0.0 \n4601 0.0 0.0 0.0 \n4602 0.0 0.0 0.0 \n4603 0.0 0.0 0.0 \n4604 0.0 0.0 0.0 \n4605 0.0 0.0 0.0 \n4606 0.0 0.0 0.0 \n4607 0.0 0.0 0.0 \n4608 0.0 0.0 0.0 \n4609 0.0 0.0 0.0 \n4610 0.0 0.0 0.0 \n4611 0.0 0.0 0.0 \n4612 0.0 0.0 0.0 \n4613 0.0 0.0 0.0 \n4614 0.0 0.0 0.0 \n4615 0.0 0.0 0.0 \n4616 0.0 0.0 0.0 \n4617 0.0 0.0 0.0 \n\n composer Bryan Adam composer Jean Frankfurter composer Lars Ulrich \\\n1 0.0 0.0 0.0 \n2 0.0 0.0 0.0 \n3 0.0 0.0 0.0 \n4 0.0 0.0 0.0 \n5 0.0 0.0 0.0 \n6 0.0 0.0 0.0 \n7 0.0 0.0 0.0 \n8 0.0 0.0 0.0 \n9 0.0 0.0 0.0 \n10 0.0 0.0 0.0 \n11 0.0 0.0 0.0 \n12 0.0 0.0 0.0 \n13 0.0 0.0 0.0 \n14 0.0 0.0 0.0 \n15 0.0 0.0 0.0 \n16 0.0 0.0 0.0 \n17 0.0 0.0 0.0 \n18 0.0 0.0 0.0 \n19 0.0 0.0 0.0 \n20 0.0 0.0 0.0 \n21 0.0 0.0 0.0 \n22 0.0 0.0 0.0 \n23 0.0 0.0 0.0 \n24 0.0 0.0 0.0 \n25 0.0 0.0 0.0 \n26 0.0 0.0 0.0 \n27 0.0 0.0 0.0 \n28 0.0 0.0 0.0 \n29 0.0 0.0 0.0 \n30 0.0 0.0 0.0 \n... ... ... ... \n4588 0.0 0.0 0.0 \n4589 0.0 0.0 0.0 \n4590 0.0 0.0 0.0 \n4591 0.0 0.0 0.0 \n4592 0.0 0.0 0.0 \n4593 0.0 0.0 0.0 \n4594 0.0 0.0 0.0 \n4595 0.0 0.0 0.0 \n4596 0.0 0.0 0.0 \n4597 0.0 0.0 0.0 \n4598 0.0 0.0 0.0 \n4599 0.0 0.0 0.0 \n4600 0.0 0.0 0.0 \n4601 0.0 0.0 0.0 \n4602 0.0 0.0 0.0 \n4603 0.0 0.0 0.0 \n4604 0.0 0.0 0.0 \n4605 0.0 0.0 0.0 \n4606 0.0 0.0 0.0 \n4607 0.0 0.0 0.0 \n4608 0.0 0.0 0.0 \n4609 0.0 0.0 0.0 \n4610 0.0 0.0 0.0 \n4611 0.0 0.0 0.0 \n4612 0.0 0.0 0.0 \n4613 0.0 0.0 0.0 \n4614 0.0 0.0 0.0 \n4615 0.0 0.0 0.0 \n4616 0.0 0.0 0.0 \n4617 0.0 0.0 0.0 \n\n composer Steve Mac composer 徐嘉良 composer Björn Ulvaeu \\\n1 0.0 0.0 0.0 \n2 0.0 0.0 0.0 \n3 0.0 0.0 0.0 \n4 0.0 0.0 0.0 \n5 0.0 0.0 0.0 \n6 0.0 0.0 0.0 \n7 0.0 0.0 0.0 \n8 0.0 0.0 0.0 \n9 0.0 0.0 0.0 \n10 0.0 0.0 0.0 \n11 0.0 0.0 0.0 \n12 0.0 0.0 0.0 \n13 0.0 0.0 0.0 \n14 0.0 0.0 0.0 \n15 0.0 0.0 0.0 \n16 0.0 0.0 0.0 \n17 0.0 0.0 0.0 \n18 0.0 0.0 0.0 \n19 0.0 0.0 0.0 \n20 0.0 0.0 0.0 \n21 0.0 0.0 0.0 \n22 0.0 0.0 0.0 \n23 0.0 0.0 0.0 \n24 0.0 0.0 0.0 \n25 0.0 0.0 0.0 \n26 0.0 0.0 0.0 \n27 0.0 0.0 0.0 \n28 0.0 0.0 0.0 \n29 0.0 0.0 0.0 \n30 0.0 0.0 0.0 \n... ... ... ... \n4588 0.0 0.0 0.0 \n4589 0.0 0.0 0.0 \n4590 0.0 0.0 0.0 \n4591 0.0 0.0 0.0 \n4592 0.0 0.0 0.0 \n4593 0.0 0.0 0.0 \n4594 0.0 0.0 0.0 \n4595 0.0 0.0 0.0 \n4596 0.0 0.0 0.0 \n4597 0.0 0.0 0.0 \n4598 0.0 0.0 0.0 \n4599 0.0 0.0 0.0 \n4600 0.0 0.0 0.0 \n4601 0.0 0.0 0.0 \n4602 0.0 0.0 0.0 \n4603 0.0 0.0 0.0 \n4604 0.0 0.0 0.0 \n4605 0.0 0.0 0.0 \n4606 0.0 0.0 0.0 \n4607 0.0 0.0 0.0 \n4608 0.0 0.0 0.0 \n4609 0.0 0.0 0.0 \n4610 0.0 0.0 0.0 \n4611 0.0 0.0 0.0 \n4612 0.0 0.0 0.0 \n4613 0.0 0.0 0.0 \n4614 0.0 0.0 0.0 \n4615 0.0 0.0 0.0 \n4616 0.0 0.0 0.0 \n4617 0.0 0.0 0.0 \n\n composer Franz Schubert ... lyricist inst \\\n1 0.0 ... 0.0 \n2 0.0 ... 0.0 \n3 0.0 ... 0.0 \n4 0.0 ... 0.0 \n5 0.0 ... 0.0 \n6 0.0 ... 0.0 \n7 0.0 ... 0.0 \n8 0.0 ... 0.0 \n9 0.0 ... 0.0 \n10 0.0 ... 0.0 \n11 0.0 ... 0.0 \n12 0.0 ... 0.0 \n13 0.0 ... 0.0 \n14 0.0 ... 0.0 \n15 0.0 ... 0.0 \n16 0.0 ... 0.0 \n17 0.0 ... 0.0 \n18 0.0 ... 0.0 \n19 0.0 ... 0.0 \n20 0.0 ... 0.0 \n21 0.0 ... 0.0 \n22 0.0 ... 0.0 \n23 0.0 ... 0.0 \n24 0.0 ... 0.0 \n25 0.0 ... 0.0 \n26 0.0 ... 0.0 \n27 0.0 ... 0.0 \n28 0.0 ... 0.0 \n29 0.0 ... 0.0 \n30 0.0 ... 0.0 \n... ... ... ... \n4588 0.0 ... 0.0 \n4589 0.0 ... 0.0 \n4590 0.0 ... 0.0 \n4591 0.0 ... 0.0 \n4592 0.0 ... 0.0 \n4593 0.0 ... 0.0 \n4594 0.0 ... 0.0 \n4595 0.0 ... 0.0 \n4596 0.0 ... 0.0 \n4597 0.0 ... 0.0 \n4598 0.0 ... 0.0 \n4599 0.0 ... 0.0 \n4600 0.0 ... 0.0 \n4601 0.0 ... 0.0 \n4602 0.0 ... 0.0 \n4603 0.0 ... 0.0 \n4604 0.0 ... 0.0 \n4605 0.0 ... 0.0 \n4606 0.0 ... 0.0 \n4607 0.0 ... 0.0 \n4608 0.0 ... 0.0 \n4609 0.0 ... 0.0 \n4610 0.0 ... 0.0 \n4611 0.0 ... 0.0 \n4612 0.0 ... 0.0 \n4613 0.0 ... 0.0 \n4614 0.0 ... 0.0 \n4615 0.0 ... 0.0 \n4616 0.0 ... 0.0 \n4617 0.0 ... 0.0 \n\n lyricist Johnny Mercer lyricist Claude Kelly lyricist 易家揚 \\\n1 0.0 0.0 0.000000 \n2 0.0 0.0 0.000000 \n3 0.0 0.0 0.000000 \n4 0.0 0.0 0.000000 \n5 0.0 0.0 0.000000 \n6 0.0 0.0 0.000000 \n7 0.0 0.0 0.000000 \n8 0.0 0.0 0.000000 \n9 0.0 0.0 0.000000 \n10 0.0 0.0 0.000000 \n11 0.0 0.0 0.000000 \n12 0.0 0.0 0.000000 \n13 0.0 0.0 0.000000 \n14 0.0 0.0 0.000000 \n15 0.0 0.0 0.000000 \n16 0.0 0.0 0.000000 \n17 0.0 0.0 0.000000 \n18 0.0 0.0 0.000000 \n19 0.0 0.0 0.000000 \n20 0.0 0.0 0.000000 \n21 0.0 0.0 0.000000 \n22 0.0 0.0 0.000000 \n23 0.0 0.0 0.000000 \n24 0.0 0.0 0.000000 \n25 0.0 0.0 0.000000 \n26 0.0 0.0 0.000000 \n27 0.0 0.0 0.000000 \n28 0.0 0.0 0.000000 \n29 0.0 0.0 0.000000 \n30 0.0 0.0 0.000000 \n... ... ... ... \n4588 0.0 0.0 0.000000 \n4589 0.0 0.0 0.000000 \n4590 0.0 0.0 0.000000 \n4591 0.0 0.0 0.000000 \n4592 0.0 0.0 0.000000 \n4593 0.0 0.0 0.000000 \n4594 0.0 0.0 0.000000 \n4595 0.0 0.0 0.000000 \n4596 0.0 0.0 0.000000 \n4597 0.0 0.0 0.000000 \n4598 0.0 0.0 0.000000 \n4599 0.0 0.0 0.181111 \n4600 0.0 0.0 0.000000 \n4601 0.0 0.0 0.000000 \n4602 0.0 0.0 0.000000 \n4603 0.0 0.0 0.000000 \n4604 0.0 0.0 0.000000 \n4605 0.0 0.0 0.000000 \n4606 0.0 0.0 0.000000 \n4607 0.0 0.0 0.000000 \n4608 0.0 0.0 0.000000 \n4609 0.0 0.0 0.000000 \n4610 0.0 0.0 0.000000 \n4611 0.0 0.0 0.000000 \n4612 0.0 0.0 0.000000 \n4613 0.0 0.0 0.000000 \n4614 0.0 0.0 0.000000 \n4615 0.0 0.0 0.000000 \n4616 0.0 0.0 0.000000 \n4617 0.0 0.0 0.000000 \n\n lyricist David Foster lyricist 傑美 lyricist Leon Huff lyricist 林夕 \\\n1 0.0 0.0 0.0 0.0 \n2 0.0 0.0 0.0 0.0 \n3 0.0 0.0 0.0 0.0 \n4 0.0 0.0 0.0 0.0 \n5 0.0 0.0 0.0 0.0 \n6 0.0 0.0 0.0 0.0 \n7 0.0 0.0 0.0 0.0 \n8 0.0 0.0 0.0 0.0 \n9 0.0 0.0 0.0 0.0 \n10 0.0 0.0 0.0 0.0 \n11 0.0 0.0 0.0 0.0 \n12 0.0 0.0 0.0 0.0 \n13 0.0 0.0 0.0 0.0 \n14 0.0 0.0 0.0 0.0 \n15 0.0 0.0 0.0 0.0 \n16 0.0 0.0 0.0 0.0 \n17 0.0 0.0 0.0 0.0 \n18 0.0 0.0 0.0 0.0 \n19 0.0 0.0 0.0 0.0 \n20 0.0 0.0 0.0 0.0 \n21 0.0 0.0 0.0 0.0 \n22 0.0 0.0 0.0 0.0 \n23 0.0 0.0 0.0 0.0 \n24 0.0 0.0 0.0 0.0 \n25 0.0 0.0 0.0 0.0 \n26 0.0 0.0 0.0 0.0 \n27 0.0 0.0 0.0 0.0 \n28 0.0 0.0 0.0 0.0 \n29 0.0 0.0 0.0 0.0 \n30 0.0 0.0 0.0 0.0 \n... ... ... ... ... \n4588 0.0 0.0 0.0 0.0 \n4589 0.0 0.0 0.0 0.0 \n4590 0.0 0.0 0.0 0.0 \n4591 0.0 0.0 0.0 0.0 \n4592 0.0 0.0 0.0 0.0 \n4593 0.0 0.0 0.0 0.0 \n4594 0.0 0.0 0.0 0.0 \n4595 0.0 0.0 0.0 0.0 \n4596 0.0 0.0 0.0 0.0 \n4597 0.0 0.0 0.0 0.0 \n4598 0.0 0.0 0.0 0.0 \n4599 0.0 0.0 0.0 0.0 \n4600 0.0 0.0 0.0 0.0 \n4601 0.0 0.0 0.0 0.0 \n4602 0.0 0.0 0.0 0.0 \n4603 0.0 0.0 0.0 0.0 \n4604 0.0 0.0 0.0 0.0 \n4605 0.0 0.0 0.0 0.0 \n4606 0.0 0.0 0.0 0.0 \n4607 0.0 0.0 0.0 0.0 \n4608 0.0 0.0 0.0 0.0 \n4609 0.0 0.0 0.0 0.0 \n4610 0.0 0.0 0.0 0.0 \n4611 0.0 0.0 0.0 0.0 \n4612 0.0 0.0 0.0 0.0 \n4613 0.0 0.0 0.0 0.0 \n4614 0.0 0.0 0.0 0.0 \n4615 0.0 0.0 0.0 0.0 \n4616 0.0 0.0 0.0 0.0 \n4617 0.0 0.0 0.0 0.0 \n\n lyricist Avril Lavigne lyricist others \n1 0.0 0.000000 \n2 0.0 0.000000 \n3 0.0 0.000000 \n4 0.0 0.000000 \n5 0.0 0.000000 \n6 0.0 0.076140 \n7 0.0 0.000000 \n8 0.0 0.000000 \n9 0.0 0.000000 \n10 0.0 0.000000 \n11 0.0 0.076140 \n12 0.0 0.076140 \n13 0.0 0.076140 \n14 0.0 0.076140 \n15 0.0 0.076140 \n16 0.0 0.076140 \n17 0.0 0.000000 \n18 0.0 0.076140 \n19 0.0 0.076140 \n20 0.0 0.000000 \n21 0.0 0.000000 \n22 0.0 0.000000 \n23 0.0 0.000000 \n24 0.0 0.000000 \n25 0.0 0.000000 \n26 0.0 0.000000 \n27 0.0 0.000000 \n28 0.0 0.076140 \n29 0.0 0.076140 \n30 0.0 0.076140 \n... ... ... \n4588 0.0 0.000000 \n4589 0.0 0.076140 \n4590 0.0 0.076140 \n4591 0.0 0.076140 \n4592 0.0 0.000000 \n4593 0.0 0.000000 \n4594 0.0 0.000000 \n4595 0.0 0.076140 \n4596 0.0 0.000000 \n4597 0.0 0.076140 \n4598 0.0 0.076140 \n4599 0.0 0.000000 \n4600 0.0 0.076140 \n4601 0.0 0.076140 \n4602 0.0 0.000000 \n4603 0.0 0.000000 \n4604 0.0 0.000000 \n4605 0.0 0.000000 \n4606 0.0 0.000000 \n4607 0.0 0.000000 \n4608 0.0 0.000000 \n4609 0.0 0.000000 \n4610 0.0 0.000000 \n4611 0.0 0.000000 \n4612 0.0 0.000000 \n4613 0.0 0.000000 \n4614 0.0 0.050047 \n4615 0.0 0.076140 \n4616 0.0 0.000000 \n4617 0.0 0.076140 \n\n[724545 rows x 771 columns]\n"
],
[
"h5store.close()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fc41f38ec844e541eb633a806d0a45fc482b7a | 1,398 | ipynb | Jupyter Notebook | _downloads/plot_optimize_lidar_data.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 48 | 2015-01-13T22:15:34.000Z | 2022-01-04T20:17:41.000Z | _downloads/plot_optimize_lidar_data.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 1 | 2017-04-25T09:01:00.000Z | 2017-04-25T13:48:56.000Z | _downloads/plot_optimize_lidar_data.ipynb | scipy-lectures/scipy-lectures.github.com | 637a0d9cc2c95ed196550371e44a4cc6e150c830 | [
"CC-BY-4.0"
] | 21 | 2015-03-16T17:52:23.000Z | 2021-02-19T00:02:13.000Z | 25.888889 | 273 | 0.496423 | [
[
[
"%matplotlib inline",
"_____no_output_____"
]
],
[
[
"\nThe lidar system, data (1 of 2 datasets)\n========================================\n\nGenerate a chart of the data recorded by the lidar system\n\n",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\nwaveform_1 = np.load('waveform_1.npy')\n\nt = np.arange(len(waveform_1))\n\nfig, ax = plt.subplots(figsize=(8, 6))\nplt.plot(t, waveform_1)\nplt.xlabel('Time [ns]')\nplt.ylabel('Amplitude [bins]')\nplt.show()",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fc44f0ad389b2ff91bb02b8b7ffe9df1fde3e8 | 3,730 | ipynb | Jupyter Notebook | notebooks/camera-calibration.ipynb | johnnylord/mtmc-testbed | e3d331505181baa076162e1f5835e566e8f70167 | [
"MIT"
] | 1 | 2020-09-25T08:46:19.000Z | 2020-09-25T08:46:19.000Z | notebooks/camera-calibration.ipynb | johnnylord/mtmc-testbed | e3d331505181baa076162e1f5835e566e8f70167 | [
"MIT"
] | null | null | null | notebooks/camera-calibration.ipynb | johnnylord/mtmc-testbed | e3d331505181baa076162e1f5835e566e8f70167 | [
"MIT"
] | 1 | 2020-09-18T01:33:45.000Z | 2020-09-18T01:33:45.000Z | 26.642857 | 336 | 0.596515 | [
[
[
"# Camera Calibration\n\nIn multiple camera tracking with overlapping view, we can utilize information from different camera to facilitate tracking algorithm.\n\n![](imgs/mtmc.png)\n\nHowever, to make use of these multi-view information, we need to first calibrate cameras so that they are in the same domain, which means that they are in the same coordinate system.\n\n![](imgs/fusion.png)",
"_____no_output_____"
],
[
"## Camera projection\n\nWhen we shoot a picture of object, the camera will project this object from world domain (3D space) to pixel domain (2D space). Although there are multiple stages in this domain transformation, all the processing in each stage is a linear operation. Therefore, you can see the mapping process as a simple linear transformation.\n\n![](imgs/coords.png)",
"_____no_output_____"
],
[
"## Intrinsic & Extrinsic parameters of camera\n\n![](imgs/intext.png)",
"_____no_output_____"
],
[
"## Planar projection (Homography Transform)\n\nIn camera projection, we are trying to map 3D object into 2D object. In planar projection, we are trying to map 2D object into 2D object with different cooridnate system.\n\n![](imgs/homography.png)",
"_____no_output_____"
],
[
"## Construct homography matrix\n\nThere are two ways you can used to construct the homography matrix:\n1. If you have intrinsic and extrinsic parameters of camera, then you can directly form the homography matrix\n2. Given a reference object with four points at its corner, you can infer the homography matrix\n\nIn our tracking scenario with aist dance dataset...\n![](imgs/aist.png)\n\n**It seems that we don't have much information to reconstruct the homography matrix. CALL FOR HELP!!!**",
"_____no_output_____"
],
[
"## Homography for Mosaicing\n\n![](imgs/mosaic.png)",
"_____no_output_____"
]
]
] | [
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7fc513f85d7ff7fe7b976987bcf935985190fc1 | 30,804 | ipynb | Jupyter Notebook | docs/tutorials/keras_layers.ipynb | sarvex/lattice-1 | 784eca50cbdfedf39f183cc7d298c9fe376b69c0 | [
"Apache-2.0"
] | 508 | 2017-10-10T20:15:18.000Z | 2022-03-29T13:22:50.000Z | docs/tutorials/keras_layers.ipynb | Saiprasad16/lattice | 35f3e9d7da7f90a700d7a903e1818e82965f245c | [
"Apache-2.0"
] | 69 | 2017-10-12T05:08:57.000Z | 2022-02-15T21:43:57.000Z | docs/tutorials/keras_layers.ipynb | Saiprasad16/lattice | 35f3e9d7da7f90a700d7a903e1818e82965f245c | [
"Apache-2.0"
] | 93 | 2017-10-11T20:12:42.000Z | 2022-03-08T14:42:13.000Z | 37.292978 | 332 | 0.527756 | [
[
[
"##### Copyright 2020 The TensorFlow Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# Creating Keras Models with TFL Layers",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/lattice/tutorials/keras_layers\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/lattice/blob/master/docs/tutorials/keras_layers.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/lattice/blob/master/docs/tutorials/keras_layers.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/lattice/docs/tutorials/keras_layers.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"##Overview\n\nYou can use TFL Keras layers to construct Keras models with monotonicity and other shape constraints. This example builds and trains a calibrated lattice model for the UCI heart dataset using TFL layers.\n\nIn a calibrated lattice model, each feature is transformed by a `tfl.layers.PWLCalibration` or a `tfl.layers.CategoricalCalibration` layer and the results are nonlinearly fused using a `tfl.layers.Lattice`.",
"_____no_output_____"
],
[
"## Setup",
"_____no_output_____"
],
[
"Installing TF Lattice package:",
"_____no_output_____"
]
],
[
[
"#@test {\"skip\": true}\n!pip install tensorflow-lattice pydot",
"_____no_output_____"
]
],
[
[
"Importing required packages:",
"_____no_output_____"
]
],
[
[
"import tensorflow as tf\n\nimport logging\nimport numpy as np\nimport pandas as pd\nimport sys\nimport tensorflow_lattice as tfl\nfrom tensorflow import feature_column as fc\nlogging.disable(sys.maxsize)",
"_____no_output_____"
]
],
[
[
"Downloading the UCI Statlog (Heart) dataset:",
"_____no_output_____"
]
],
[
[
"# UCI Statlog (Heart) dataset.\ncsv_file = tf.keras.utils.get_file(\n 'heart.csv', 'http://storage.googleapis.com/download.tensorflow.org/data/heart.csv')\ntraining_data_df = pd.read_csv(csv_file).sample(\n frac=1.0, random_state=41).reset_index(drop=True)\ntraining_data_df.head()",
"_____no_output_____"
]
],
[
[
"Setting the default values used for training in this guide:",
"_____no_output_____"
]
],
[
[
"LEARNING_RATE = 0.1\nBATCH_SIZE = 128\nNUM_EPOCHS = 100",
"_____no_output_____"
]
],
[
[
"## Sequential Keras Model\n\nThis example creates a Sequential Keras model and only uses TFL layers.\n\nLattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers.\n",
"_____no_output_____"
]
],
[
[
"# Lattice layer expects input[i] to be within [0, lattice_sizes[i] - 1.0], so\nlattice_sizes = [3, 2, 2, 2, 2, 2, 2]",
"_____no_output_____"
]
],
[
[
"We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in parallel in order to be able to create a Sequential model.\n",
"_____no_output_____"
]
],
[
[
"combined_calibrators = tfl.layers.ParallelCombination()",
"_____no_output_____"
]
],
[
[
"We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration`, and for categorical features we use `tfl.layers.CategoricalCalibration`.",
"_____no_output_____"
]
],
[
[
"# ############### age ###############\ncalibrator = tfl.layers.PWLCalibration(\n # Every PWLCalibration layer must have keypoints of piecewise linear\n # function specified. Easiest way to specify them is to uniformly cover\n # entire input range by using numpy.linspace().\n input_keypoints=np.linspace(\n training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n # You need to ensure that input keypoints have same dtype as layer input.\n # You can do it by setting dtype here or by providing keypoints in such\n # format which will be converted to desired tf.dtype by default.\n dtype=tf.float32,\n # Output range must correspond to expected lattice input range.\n output_min=0.0,\n output_max=lattice_sizes[0] - 1.0,\n)\ncombined_calibrators.append(calibrator)\n\n# ############### sex ###############\n# For boolean features simply specify CategoricalCalibration layer with 2\n# buckets.\ncalibrator = tfl.layers.CategoricalCalibration(\n num_buckets=2,\n output_min=0.0,\n output_max=lattice_sizes[1] - 1.0,\n # Initializes all outputs to (output_min + output_max) / 2.0.\n kernel_initializer='constant')\ncombined_calibrators.append(calibrator)\n\n# ############### cp ###############\ncalibrator = tfl.layers.PWLCalibration(\n # Here instead of specifying dtype of layer we convert keypoints into\n # np.float32.\n input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),\n output_min=0.0,\n output_max=lattice_sizes[2] - 1.0,\n monotonicity='increasing',\n # You can specify TFL regularizers as a tuple ('regularizer name', l1, l2).\n kernel_regularizer=('hessian', 0.0, 1e-4))\ncombined_calibrators.append(calibrator)\n\n# ############### trestbps ###############\ncalibrator = tfl.layers.PWLCalibration(\n # Alternatively, you might want to use quantiles as keypoints instead of\n # uniform keypoints\n input_keypoints=np.quantile(training_data_df['trestbps'],\n np.linspace(0.0, 1.0, num=5)),\n dtype=tf.float32,\n # Together with quantile keypoints you might want to initialize piecewise\n # linear function to have 'equal_slopes' in order for output of layer\n # after initialization to preserve original distribution.\n kernel_initializer='equal_slopes',\n output_min=0.0,\n output_max=lattice_sizes[3] - 1.0,\n # You might consider clamping extreme inputs of the calibrator to output\n # bounds.\n clamp_min=True,\n clamp_max=True,\n monotonicity='increasing')\ncombined_calibrators.append(calibrator)\n\n# ############### chol ###############\ncalibrator = tfl.layers.PWLCalibration(\n # Explicit input keypoint initialization.\n input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],\n dtype=tf.float32,\n output_min=0.0,\n output_max=lattice_sizes[4] - 1.0,\n # Monotonicity of calibrator can be decreasing. Note that corresponding\n # lattice dimension must have INCREASING monotonicity regardless of\n # monotonicity direction of calibrator.\n monotonicity='decreasing',\n # Convexity together with decreasing monotonicity result in diminishing\n # return constraint.\n convexity='convex',\n # You can specify list of regularizers. You are not limited to TFL\n # regularizrs. Feel free to use any :)\n kernel_regularizer=[('laplacian', 0.0, 1e-4),\n tf.keras.regularizers.l1_l2(l1=0.001)])\ncombined_calibrators.append(calibrator)\n\n# ############### fbs ###############\ncalibrator = tfl.layers.CategoricalCalibration(\n num_buckets=2,\n output_min=0.0,\n output_max=lattice_sizes[5] - 1.0,\n # For categorical calibration layer monotonicity is specified for pairs\n # of indices of categories. Output for first category in pair will be\n # smaller than output for second category.\n #\n # Don't forget to set monotonicity of corresponding dimension of Lattice\n # layer to '1'.\n monotonicities=[(0, 1)],\n # This initializer is identical to default one('uniform'), but has fixed\n # seed in order to simplify experimentation.\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1))\ncombined_calibrators.append(calibrator)\n\n# ############### restecg ###############\ncalibrator = tfl.layers.CategoricalCalibration(\n num_buckets=3,\n output_min=0.0,\n output_max=lattice_sizes[6] - 1.0,\n # Categorical monotonicity can be partial order.\n monotonicities=[(0, 1), (0, 2)],\n # Categorical calibration layer supports standard Keras regularizers.\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),\n kernel_initializer='constant')\ncombined_calibrators.append(calibrator)",
"_____no_output_____"
]
],
[
[
"We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.\n\nNote that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of CategoricalCalibration layer.",
"_____no_output_____"
]
],
[
[
"lattice = tfl.layers.Lattice(\n lattice_sizes=lattice_sizes,\n monotonicities=[\n 'increasing', 'none', 'increasing', 'increasing', 'increasing',\n 'increasing', 'increasing'\n ],\n output_min=0.0,\n output_max=1.0)",
"_____no_output_____"
]
],
[
[
"We can then create a sequential model using the combined calibrators and lattice layers.",
"_____no_output_____"
]
],
[
[
"model = tf.keras.models.Sequential()\nmodel.add(combined_calibrators)\nmodel.add(lattice)",
"_____no_output_____"
]
],
[
[
"Training works the same as any other keras model.",
"_____no_output_____"
]
],
[
[
"features = training_data_df[[\n 'age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg'\n]].values.astype(np.float32)\ntarget = training_data_df[['target']].values.astype(np.float32)\n\nmodel.compile(\n loss=tf.keras.losses.mean_squared_error,\n optimizer=tf.keras.optimizers.Adagrad(learning_rate=LEARNING_RATE))\nmodel.fit(\n features,\n target,\n batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n validation_split=0.2,\n shuffle=False,\n verbose=0)\n\nmodel.evaluate(features, target)",
"_____no_output_____"
]
],
[
[
"## Functional Keras Model\n\nThis example uses a functional API for Keras model construction.\n\nAs mentioned in the previous section, lattice layers expect `input[i]` to be within `[0, lattice_sizes[i] - 1.0]`, so we need to define the lattice sizes ahead of the calibration layers so we can properly specify output range of the calibration layers.",
"_____no_output_____"
]
],
[
[
"# We are going to have 2-d embedding as one of lattice inputs.\nlattice_sizes = [3, 2, 2, 3, 3, 2, 2]",
"_____no_output_____"
]
],
[
[
"For each feature, we need to create an input layer followed by a calibration layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`.",
"_____no_output_____"
]
],
[
[
"model_inputs = []\nlattice_inputs = []\n# ############### age ###############\nage_input = tf.keras.layers.Input(shape=[1], name='age')\nmodel_inputs.append(age_input)\nage_calibrator = tfl.layers.PWLCalibration(\n # Every PWLCalibration layer must have keypoints of piecewise linear\n # function specified. Easiest way to specify them is to uniformly cover\n # entire input range by using numpy.linspace().\n input_keypoints=np.linspace(\n training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n # You need to ensure that input keypoints have same dtype as layer input.\n # You can do it by setting dtype here or by providing keypoints in such\n # format which will be converted to desired tf.dtype by default.\n dtype=tf.float32,\n # Output range must correspond to expected lattice input range.\n output_min=0.0,\n output_max=lattice_sizes[0] - 1.0,\n monotonicity='increasing',\n name='age_calib',\n)(\n age_input)\nlattice_inputs.append(age_calibrator)\n\n# ############### sex ###############\n# For boolean features simply specify CategoricalCalibration layer with 2\n# buckets.\nsex_input = tf.keras.layers.Input(shape=[1], name='sex')\nmodel_inputs.append(sex_input)\nsex_calibrator = tfl.layers.CategoricalCalibration(\n num_buckets=2,\n output_min=0.0,\n output_max=lattice_sizes[1] - 1.0,\n # Initializes all outputs to (output_min + output_max) / 2.0.\n kernel_initializer='constant',\n name='sex_calib',\n)(\n sex_input)\nlattice_inputs.append(sex_calibrator)\n\n# ############### cp ###############\ncp_input = tf.keras.layers.Input(shape=[1], name='cp')\nmodel_inputs.append(cp_input)\ncp_calibrator = tfl.layers.PWLCalibration(\n # Here instead of specifying dtype of layer we convert keypoints into\n # np.float32.\n input_keypoints=np.linspace(1, 4, num=4, dtype=np.float32),\n output_min=0.0,\n output_max=lattice_sizes[2] - 1.0,\n monotonicity='increasing',\n # You can specify TFL regularizers as tuple ('regularizer name', l1, l2).\n kernel_regularizer=('hessian', 0.0, 1e-4),\n name='cp_calib',\n)(\n cp_input)\nlattice_inputs.append(cp_calibrator)\n\n# ############### trestbps ###############\ntrestbps_input = tf.keras.layers.Input(shape=[1], name='trestbps')\nmodel_inputs.append(trestbps_input)\ntrestbps_calibrator = tfl.layers.PWLCalibration(\n # Alternatively, you might want to use quantiles as keypoints instead of\n # uniform keypoints\n input_keypoints=np.quantile(training_data_df['trestbps'],\n np.linspace(0.0, 1.0, num=5)),\n dtype=tf.float32,\n # Together with quantile keypoints you might want to initialize piecewise\n # linear function to have 'equal_slopes' in order for output of layer\n # after initialization to preserve original distribution.\n kernel_initializer='equal_slopes',\n output_min=0.0,\n output_max=lattice_sizes[3] - 1.0,\n # You might consider clamping extreme inputs of the calibrator to output\n # bounds.\n clamp_min=True,\n clamp_max=True,\n monotonicity='increasing',\n name='trestbps_calib',\n)(\n trestbps_input)\nlattice_inputs.append(trestbps_calibrator)\n\n# ############### chol ###############\nchol_input = tf.keras.layers.Input(shape=[1], name='chol')\nmodel_inputs.append(chol_input)\nchol_calibrator = tfl.layers.PWLCalibration(\n # Explicit input keypoint initialization.\n input_keypoints=[126.0, 210.0, 247.0, 286.0, 564.0],\n output_min=0.0,\n output_max=lattice_sizes[4] - 1.0,\n # Monotonicity of calibrator can be decreasing. Note that corresponding\n # lattice dimension must have INCREASING monotonicity regardless of\n # monotonicity direction of calibrator.\n monotonicity='decreasing',\n # Convexity together with decreasing monotonicity result in diminishing\n # return constraint.\n convexity='convex',\n # You can specify list of regularizers. You are not limited to TFL\n # regularizrs. Feel free to use any :)\n kernel_regularizer=[('laplacian', 0.0, 1e-4),\n tf.keras.regularizers.l1_l2(l1=0.001)],\n name='chol_calib',\n)(\n chol_input)\nlattice_inputs.append(chol_calibrator)\n\n# ############### fbs ###############\nfbs_input = tf.keras.layers.Input(shape=[1], name='fbs')\nmodel_inputs.append(fbs_input)\nfbs_calibrator = tfl.layers.CategoricalCalibration(\n num_buckets=2,\n output_min=0.0,\n output_max=lattice_sizes[5] - 1.0,\n # For categorical calibration layer monotonicity is specified for pairs\n # of indices of categories. Output for first category in pair will be\n # smaller than output for second category.\n #\n # Don't forget to set monotonicity of corresponding dimension of Lattice\n # layer to '1'.\n monotonicities=[(0, 1)],\n # This initializer is identical to default one ('uniform'), but has fixed\n # seed in order to simplify experimentation.\n kernel_initializer=tf.keras.initializers.RandomUniform(\n minval=0.0, maxval=lattice_sizes[5] - 1.0, seed=1),\n name='fbs_calib',\n)(\n fbs_input)\nlattice_inputs.append(fbs_calibrator)\n\n# ############### restecg ###############\nrestecg_input = tf.keras.layers.Input(shape=[1], name='restecg')\nmodel_inputs.append(restecg_input)\nrestecg_calibrator = tfl.layers.CategoricalCalibration(\n num_buckets=3,\n output_min=0.0,\n output_max=lattice_sizes[6] - 1.0,\n # Categorical monotonicity can be partial order.\n monotonicities=[(0, 1), (0, 2)],\n # Categorical calibration layer supports standard Keras regularizers.\n kernel_regularizer=tf.keras.regularizers.l1_l2(l1=0.001),\n kernel_initializer='constant',\n name='restecg_calib',\n)(\n restecg_input)\nlattice_inputs.append(restecg_calibrator)",
"_____no_output_____"
]
],
[
[
"We then create a lattice layer to nonlinearly fuse the outputs of the calibrators.\n\nNote that we need to specify the monotonicity of the lattice to be increasing for required dimensions. The composition with the direction of the monotonicity in the calibration will result in the correct end-to-end direction of monotonicity. This includes partial monotonicity of `tfl.layers.CategoricalCalibration` layer.",
"_____no_output_____"
]
],
[
[
"lattice = tfl.layers.Lattice(\n lattice_sizes=lattice_sizes,\n monotonicities=[\n 'increasing', 'none', 'increasing', 'increasing', 'increasing',\n 'increasing', 'increasing'\n ],\n output_min=0.0,\n output_max=1.0,\n name='lattice',\n)(\n lattice_inputs)",
"_____no_output_____"
]
],
[
[
"To add more flexibility to the model, we add an output calibration layer.",
"_____no_output_____"
]
],
[
[
"model_output = tfl.layers.PWLCalibration(\n input_keypoints=np.linspace(0.0, 1.0, 5),\n name='output_calib',\n)(\n lattice)",
"_____no_output_____"
]
],
[
[
"We can now create a model using the inputs and outputs.",
"_____no_output_____"
]
],
[
[
"model = tf.keras.models.Model(\n inputs=model_inputs,\n outputs=model_output)\ntf.keras.utils.plot_model(model, rankdir='LR')",
"_____no_output_____"
]
],
[
[
"Training works the same as any other keras model. Note that, with our setup, input features are passed as separate tensors.",
"_____no_output_____"
]
],
[
[
"feature_names = ['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg']\nfeatures = np.split(\n training_data_df[feature_names].values.astype(np.float32),\n indices_or_sections=len(feature_names),\n axis=1)\ntarget = training_data_df[['target']].values.astype(np.float32)\n\nmodel.compile(\n loss=tf.keras.losses.mean_squared_error,\n optimizer=tf.keras.optimizers.Adagrad(LEARNING_RATE))\nmodel.fit(\n features,\n target,\n batch_size=BATCH_SIZE,\n epochs=NUM_EPOCHS,\n validation_split=0.2,\n shuffle=False,\n verbose=0)\n\nmodel.evaluate(features, target)",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fc5146651d28eae0bc935decef0268f613ea98 | 23,877 | ipynb | Jupyter Notebook | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch | 85bc986aedd034e91a8d9c61d860477ab8a6a2e6 | [
"MIT"
] | null | null | null | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch | 85bc986aedd034e91a8d9c61d860477ab8a6a2e6 | [
"MIT"
] | null | null | null | MachineLearning/supervised_machine_learning/Polinamial_and_PlynomialRidge_Regression.ipynb | pavi-ninjaac/Machine_Learing_sratch | 85bc986aedd034e91a8d9c61d860477ab8a6a2e6 | [
"MIT"
] | null | null | null | 34.554269 | 153 | 0.47485 | [
[
[
"import numpy as np\nimport pandas as pd\n\nfrom itertools import combinations_with_replacement\n\nfrom sklearn.metrics import r2_score\nfrom sklearn.datasets import make_regression",
"_____no_output_____"
]
],
[
[
"# Common Regression class",
"_____no_output_____"
]
],
[
[
"class Regression:\n def __init__(self, learning_rate, iteration, regularization):\n \"\"\"\n :param learning_rate: A samll value needed for gradient decent, default value id 0.1.\n :param iteration: Number of training iteration, default value is 10,000.\n \"\"\"\n self.m = None\n self.n = None\n self.w = None\n self.b = None\n self.regularization = regularization # will be the l1/l2 regularization class according to the regression model.\n self.lr = learning_rate\n self.it = iteration\n\n def cost_function(self, y, y_pred):\n \"\"\"\n :param y: Original target value.\n :param y_pred: predicted target value.\n \"\"\"\n return (1 / (2*self.m)) * np.sum(np.square(y_pred - y)) + self.regularization(self.w)\n \n def hypothesis(self, weights, bias, X):\n \"\"\"\n :param weights: parameter value weight.\n :param X: Training samples.\n \"\"\"\n return np.dot(X, weights) #+ bias\n\n def train(self, X, y):\n \"\"\"\n :param X: training data feature values ---> N Dimentional vector.\n :param y: training data target value -----> 1 Dimentional array.\n \"\"\"\n # Insert constant ones for bias weights.\n X = np.insert(X, 0, 1, axis=1)\n\n # Target value should be in the shape of (n, 1) not (n, ).\n # So, this will check that and change the shape to (n, 1), if not.\n try:\n y.shape[1]\n except IndexError as e:\n # we need to change it to the 1 D array, not a list.\n print(\"ERROR: Target array should be a one dimentional array not a list\"\n \"----> here the target value not in the shape of (n,1). \\nShape ({shape_y_0},1) and {shape_y} not match\"\n .format(shape_y_0 = y.shape[0] , shape_y = y.shape))\n return \n \n # m is the number of training samples.\n self.m = X.shape[0]\n # n is the number of features.\n self.n = X.shape[1]\n\n # Set the initial weight.\n self.w = np.zeros((self.n , 1))\n\n # bias.\n self.b = 0\n\n for it in range(1, self.it+1):\n # 1. Find the predicted value through the hypothesis.\n # 2. Find the Cost function value.\n # 3. Find the derivation of weights.\n # 4. Apply Gradient Decent.\n y_pred = self.hypothesis(self.w, self.b, X)\n #print(\"iteration\",it)\n #print(\"y predict value\",y_pred)\n cost = self.cost_function(y, y_pred)\n #print(\"Cost function\",cost)\n # fin the derivative.\n dw = (1/self.m) * np.dot(X.T, (y_pred - y)) + self.regularization.derivation(self.w)\n #print(\"weights derivation\",dw)\n #db = -(2 / self.m) * np.sum((y_pred - y))\n\n # change the weight parameter.\n self.w = self.w - self.lr * dw\n #print(\"updated weights\",self.w)\n #self.b = self.b - self.lr * db\n\n\n if it % 10 == 0:\n print(\"The Cost function for the iteration {}----->{} :)\".format(it, cost))\n def predict(self, test_X):\n \"\"\"\n :param test_X: feature values to predict.\n \"\"\"\n # Insert constant ones for bias weights.\n test_X = np.insert(test_X, 0, 1, axis=1)\n\n y_pred = self.hypothesis(self.w, self.b, test_X)\n return y_pred",
"_____no_output_____"
]
],
[
[
"# Data Creation",
"_____no_output_____"
]
],
[
[
"# Define the traning data.\nX, y = make_regression(n_samples=50000, n_features=8)\n\n# Chnage the shape of the target to 1 dimentional array.\ny = y[:, np.newaxis]\n\nprint(\"=\"*100)\nprint(\"Number of training data samples-----> {}\".format(X.shape[0]))\nprint(\"Number of training features --------> {}\".format(X.shape[1]))\nprint(\"Shape of the target value ----------> {}\".format(y.shape))",
"====================================================================================================\nNumber of training data samples-----> 50000\nNumber of training features --------> 8\nShape of the target value ----------> (50000, 1)\n"
],
[
"# display the data.\ndata = pd.DataFrame(X)\ndata.head()\n",
"_____no_output_____"
],
[
"# display the data.\ndata_y = pd.DataFrame(y)\ndata_y.head()",
"_____no_output_____"
]
],
[
[
"# Polynomial Regression from Scratch",
"_____no_output_____"
]
],
[
[
"def PolynomialFeature(X, degree):\n \"\"\"\n It is type of feature engineering ---> adding some more features based on the exisiting features \n by squaring or cubing.\n :param X: data need to be converted.\n :param degree: int- The degree of the polynomial that the features X will be transformed to.\n \"\"\"\n n_samples, n_features = X.shape\n\n # get the index combinations.\n combination = [combinations_with_replacement(range(n_features), i) for i in range(0, degree + 1)]\n combination_index = [index for obj in combination for index in obj]\n\n # generate a empty array with new shape.\n new_n_features = len(combination_index)\n X_new = np.empty((n_samples, new_n_features))\n\n for i, com_index in enumerate(combination_index):\n X_new[:, i] = np.prod(X[:, com_index], axis=1)\n \n return X_new\n\n# Used for Polynomial Ridge regression.\nclass l2_regularization:\n \"\"\"Regularization used for Ridge Regression\"\"\"\n def __init__(self, lamda):\n self.lamda = lamda\n\n def __call__(self, weights):\n \"This will be retuned when we call this class.\"\n return self.lamda * np.sum(np.square(weights))\n \n def derivation(self, weights):\n \"Derivation of the regulariozation function.\"\n return self.lamda * 2 * (weights)\n",
"_____no_output_____"
],
[
"class PolynamialRegression(Regression):\n \"\"\"\n Polynomail Regression is also a type of non-linear regression with no regularization. \n Before fitting the linear regression, the dependant variable is tranformed to some polynomail degree.\n This is basincally transforming linear data to have some nonliniarity.\n \"\"\"\n def __init__(self, learning_rate, iteration, degree):\n \"\"\"\n :param learning_rate: [range from 0 to infinity] the stpe distance used while doing gradiant decent.\n :param iteration: int - Number of iteration to do.\n :param degree: int - The degree of the polynomial that the feature transformed to.\n \"\"\"\n self.degree = degree\n # No regularization here. So, making the regularization methods to return 0.\n self.regularization = lambda x: 0\n self.regularization.derivation = lambda x: 0\n super().__init__(learning_rate, iteration, self.regularization)\n \n def train(self, X, y):\n \"\"\"\n :param X: training data feature values ---> N Dimentional vector.\n :param y: training data target value -----> 1 Dimentional array.\n \"\"\"\n # change the data to \n X_poly = PolynomialFeature(X, degree=self.degree)\n return super().train(X_poly, y)\n \n def predict(self, test_X):\n \"\"\"\n :param test_X: feature values to predict.\n \"\"\"\n test_X_poly = PolynomialFeature(test_X, degree=self.degree)\n return super().predict(test_X_poly)",
"_____no_output_____"
],
[
"#define the parameters\nparam = {\n \"degree\" : 2,\n \"learning_rate\" : 0.1,\n \"iteration\" : 100,\n}\nprint(\"=\"*100)\npolynomial_reg = PolynamialRegression(**param)\n\n# Train the model.\npolynomial_reg.train(X, y) \n\n# Predict the values.\ny_pred = polynomial_reg.predict(X)\n\n#Root mean square error.\nscore = r2_score(y, y_pred)\nprint(\"The r2_score of the trained model\", score)",
"====================================================================================================\nThe Cost function for the iteration 10----->2524.546198902789 :)\nThe Cost function for the iteration 20----->313.8199639696676 :)\nThe Cost function for the iteration 30----->39.17839267886082 :)\nThe Cost function for the iteration 40----->4.916567388701627 :)\nThe Cost function for the iteration 50----->0.6225340983364702 :)\nThe Cost function for the iteration 60----->0.08070495018731812 :)\nThe Cost function for the iteration 70----->0.011282742313695108 :)\nThe Cost function for the iteration 80----->0.0019608909310563647 :)\nThe Cost function for the iteration 90----->0.0005118599780978334 :)\nThe Cost function for the iteration 100----->0.00019559828225020284 :)\nThe r2_score of the trained model 0.9999999891242503\n"
]
],
[
[
"# Polynomial Regression using scikit-learn for comparision",
"_____no_output_____"
]
],
[
[
"from sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import r2_score\n\n# data is already defined, going to use the same data for comparision.\nprint(\"=\"*100)\nprint(\"Number of training data samples-----> {}\".format(X.shape[0]))\nprint(\"Number of training features --------> {}\".format(X.shape[1]))\n",
"====================================================================================================\nNumber of training data samples-----> 50000\nNumber of training features --------> 8\n"
],
[
"linear_reg_sklearn = LinearRegression()\n\npoly = PolynomialFeatures(degree = 2)\nX_new = poly.fit_transform(X)\nlinear_reg_sklearn.fit(X, y)\n\n# predict the value\ny_pred_sklearn = linear_reg_sklearn.predict(X)\nscore = r2_score(y, y_pred_sklearn)\nprint(\"=\"*100)\nprint(\"R2 score of the model is {}\".format(score))",
"====================================================================================================\nR2 score of the model is 1.0\n"
]
],
[
[
"# Polynomial Ridge Regression from scratch",
"_____no_output_____"
]
],
[
[
"class PolynamialRidgeRegression(Regression):\n \"\"\"\n Polynomail Ridge Regression is basically polynomial regression with l2 regularization.\n \"\"\"\n def __init__(self, learning_rate, iteration, degree, lamda):\n \"\"\"\n :param learning_rate: [range from 0 to infinity] the stpe distance used while doing gradiant decent.\n :param iteration: int - Number of iteration to do.\n :param degree: int - The degree of the polynomial that the feature transformed to.\n \"\"\"\n self.degree = degree\n # No regularization here. So, making the regularization methods to return 0.\n self.regularization = l2_regularization(lamda)\n super().__init__(learning_rate, iteration, self.regularization)\n \n def train(self, X, y):\n \"\"\"\n :param X: training data feature values ---> N Dimentional vector.\n :param y: training data target value -----> 1 Dimentional array.\n \"\"\"\n # change the data to \n X_poly = PolynomialFeature(X, degree=self.degree)\n return super().train(X_poly, y)\n \n def predict(self, test_X):\n \"\"\"\n :param test_X: feature values to predict.\n \"\"\"\n test_X_poly = PolynomialFeature(test_X, degree=self.degree)\n return super().predict(test_X_poly)",
"_____no_output_____"
],
[
"#define the parameters\nparam = {\n \"lamda\": 0.1,\n \"degree\" : 2,\n \"learning_rate\" : 0.1,\n \"iteration\" : 100,\n}\nprint(\"=\"*100)\npolynomial_reg = PolynamialRidgeRegression(**param)\n\n# Train the model.\npolynomial_reg.train(X, y) \n\n# Predict the values.\ny_pred = polynomial_reg.predict(X)\n\n#Root mean square error.\nscore = r2_score(y, y_pred)\nprint(\"The r2_score of the trained model\", score)",
"====================================================================================================\nThe Cost function for the iteration 10----->4178.872832133191 :)\nThe Cost function for the iteration 20----->2887.989505020741 :)\nThe Cost function for the iteration 30----->2785.6247039737964 :)\nThe Cost function for the iteration 40----->2777.471815365709 :)\nThe Cost function for the iteration 50----->2776.819294060092 :)\nThe Cost function for the iteration 60----->2776.7666829082946 :)\nThe Cost function for the iteration 70----->2776.7623662294877 :)\nThe Cost function for the iteration 80----->2776.761991761519 :)\nThe Cost function for the iteration 90----->2776.761953080877 :)\nThe Cost function for the iteration 100----->2776.761947221511 :)\nThe r2_score of the trained model 0.9718297887794873\n"
]
],
[
[
"# Supervised Machine Learning models scratch series....\nyou can also check....\n\n- 1) Linear Regression ---> https://www.kaggle.com/ninjaac/linear-regression-from-scratch\n- 2) Lasso Regression ---> https://www.kaggle.com/ninjaac/lasso-ridge-regression \n- 3) Ridge Regression ---> https://www.kaggle.com/ninjaac/lasso-ridge-regression \n- 4) ElasticNet Regression ---> https://www.kaggle.com/ninjaac/elasticnet-regression \n- 5) Polynomail Regression ---> https://www.kaggle.com/ninjaac/polynomial-and-polynomialridge-regression (Same Notebook you are looking now)\n- 5) PolynomailRidge Regression---> https://www.kaggle.com/ninjaac/polynomial-and-polynomialridge-regression (Same Notebook you are looking now)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
]
] |
e7fc557d94965badac4ab40624ccf79b2357e278 | 4,212 | ipynb | Jupyter Notebook | 9_coding quizzes/05_list_HackerRank.ipynb | lucaseo/TIL | a15b7c1d3f9666a682f0b95ab320e8567495559a | [
"MIT"
] | null | null | null | 9_coding quizzes/05_list_HackerRank.ipynb | lucaseo/TIL | a15b7c1d3f9666a682f0b95ab320e8567495559a | [
"MIT"
] | null | null | null | 9_coding quizzes/05_list_HackerRank.ipynb | lucaseo/TIL | a15b7c1d3f9666a682f0b95ab320e8567495559a | [
"MIT"
] | null | null | null | 23.530726 | 232 | 0.451804 | [
[
[
"# Lists \nfrom: [HackerRank](https://www.hackerrank.com/challenges/python-lists/problem) - (easy)\n\nConsider a list (list = []). You can perform the following commands:\n\ninsert `i`, `e`: Insert integer at position. \nprint(): Print the list. \nremove `e`: Delete the first occurrence of integer. \nappend `e`: Insert integer at the end of the list. \nsort: Sort the list. \npop: Pop the last element from the list. \nreverse: Reverse the list. \n\nInitialize your list and read in the value of followed by lines of commands where each command will be of the types listed above. Iterate through each command in order and perform the corresponding operation on your list.\n\n**Input Format**\nThe first line contains an integer, n, denoting the number of commands.\nEach line of the subsequent lines contains one of the commands described above.\n\n**Constraints** \nThe elements added to the list must be integers.\n\n**Output Format** \nFor each command of type print, print the list on a new line.\n\n**Sample Input**\n```\n12\ninsert 0 5\ninsert 1 10\ninsert 0 6\nprint\nremove 6\nappend 9\nappend 1\nsort\nprint\npop\nreverse\nprint\n```\n**Sample Output**\n```\n[6, 5, 10]\n[1, 5, 9, 10]\n[9, 5, 1]\n```",
"_____no_output_____"
]
],
[
[
"N = int(input())\nls = []\nfor i in range(N):\n n = input()\n a = n.split()\n cmd = a[0]\n if cmd == \"insert\":\n ls.insert(int(a[1]), int(a[2]))\n elif cmd == \"remove\":\n ls.remove(int(a[1]))\n elif cmd == \"append\":\n ls.append(int(a[1]))\n elif cmd == \"sort\":\n ls.sort()\n elif cmd == \"pop\":\n ls.pop()\n elif cmd == \"reverse\":\n ls.reverse()\n elif cmd == \"print\":\n print(ls)",
"12\ninsert 0 5\ninsert 1 10\ninsert 0 6\nprint\n[6, 5, 10]\nremove 6\nappend 9\nappend 1\nsort\nprint\n[1, 5, 9, 10]\npop\nreverse\nprint\n[9, 5, 1]\n"
],
[
"n = int(input())\nls = []\nfor _ in range(n):\n s = input().split()\n cmd = s[0]\n args = s[1:]\n if cmd !=\"print\":\n cmd += \"(\"+ \",\".join(args) +\")\"\n eval(\"ls.\"+cmd)\n else:\n print(ls)",
"12\ninsert 0 5\ninsert 1 10\ninsert 0 6\nprint\n[6, 5, 10]\nremove 6\nappend 9\nappend 1\nsort\nprint\n[1, 5, 9, 10]\npop\nreverse\nprint\n[9, 5, 1]\n"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code"
]
] |
e7fc608931132047521db5c3504ce73ed0f06eb4 | 502,108 | ipynb | Jupyter Notebook | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml | b8cc450ed2a53417a3ff9431528dbbd7fcfcc6ea | [
"MIT"
] | 7 | 2019-05-03T01:18:56.000Z | 2021-08-21T18:44:17.000Z | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml | b8cc450ed2a53417a3ff9431528dbbd7fcfcc6ea | [
"MIT"
] | null | null | null | frameworks/tensorflow/adanet_objective.ipynb | jiankaiwang/sophia.ml | b8cc450ed2a53417a3ff9431528dbbd7fcfcc6ea | [
"MIT"
] | 3 | 2019-01-17T03:53:31.000Z | 2022-01-27T14:33:54.000Z | 65.149604 | 969 | 0.663294 | [
[
[
"!pip install adanet",
"Collecting adanet\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/04/c4/11ac106b2f8946ebe1940ebe26ef4dd212d655c4a2e28bbcc3b5312268e4/adanet-0.3.0-py2.py3-none-any.whl (65kB)\n\u001b[K 100% |################################| 71kB 353kB/s ta 0:00:01\n\u001b[?25hRequirement already satisfied: protobuf>=3.6.0 in /usr/local/lib/python3.5/dist-packages (from adanet) (3.6.1)\nRequirement already satisfied: numpy>=1.12.0 in /usr/local/lib/python3.5/dist-packages (from adanet) (1.15.2)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python3.5/dist-packages (from adanet) (1.11.0)\nRequirement already satisfied: setuptools in /usr/local/lib/python3.5/dist-packages (from protobuf>=3.6.0->adanet) (39.1.0)\nInstalling collected packages: adanet\nSuccessfully installed adanet-0.3.0\n\u001b[33mYou are using pip version 18.0, however version 18.1 is available.\nYou should consider upgrading via the 'pip install --upgrade pip' command.\u001b[0m\n"
]
],
[
[
"##### Copyright 2018 The AdaNet Authors.",
"_____no_output_____"
]
],
[
[
"#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.",
"_____no_output_____"
]
],
[
[
"# The AdaNet objective",
"_____no_output_____"
],
[
"<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/adanet/blob/master/adanet/examples/tutorials/adanet_objective.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/adanet/blob/master/adanet/examples/tutorials/adanet_objective.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n</table>",
"_____no_output_____"
],
[
"One of key contributions from *AdaNet: Adaptive Structural Learning of Neural\nNetworks* [[Cortes et al., ICML 2017](https://arxiv.org/abs/1607.01097)] is\ndefining an algorithm that aims to directly minimize the DeepBoost\ngeneralization bound from *Deep Boosting*\n[[Cortes et al., ICML 2014](http://proceedings.mlr.press/v32/cortesb14.pdf)]\nwhen applied to neural networks. This algorithm, called **AdaNet**, adaptively\ngrows a neural network as an ensemble of subnetworks that minimizes the AdaNet\nobjective (a.k.a. AdaNet loss):\n\n$$F(w) = \\frac{1}{m} \\sum_{i=1}^{m} \\Phi \\left(\\sum_{j=1}^{N}w_jh_j(x_i), y_i \\right) + \\sum_{j=1}^{N} \\left(\\lambda r(h_j) + \\beta \\right) |w_j| $$\n\nwhere $w$ is the set of mixture weights, one per subnetwork $h$,\n$\\Phi$ is a surrogate loss function such as logistic loss or MSE, $r$ is a\nfunction for measuring a subnetwork's complexity, and $\\lambda$ and $\\beta$\nare hyperparameters.\n\n## Mixture weights\n\nSo what are mixture weights? When forming an ensemble $f$ of subnetworks $h$,\nwe need to somehow combine the their predictions. This is done by multiplying\nthe outputs of subnetwork $h_i$ with mixture weight $w_i$, and summing the\nresults:\n\n$$f(x) = \\sum_{j=1}^{N}w_jh_j(x)$$\n\nIn practice, most commonly used set of mixture weight is **uniform average\nweighting**:\n\n$$f(x) = \\frac{1}{N}\\sum_{j=1}^{N}h_j(x)$$\n\nHowever, we can also solve a convex optimization problem to learn the mixture\nweights that minimize the loss function $\\Phi$:\n\n$$F(w) = \\frac{1}{m} \\sum_{i=1}^{m} \\Phi \\left(\\sum_{j=1}^{N}w_jh_j(x_i), y_i \\right)$$\n\nThis is the first term in the AdaNet objective. The second term applies L1\nregularization to the mixture weights:\n\n$$\\sum_{j=1}^{N} \\left(\\lambda r(h_j) + \\beta \\right) |w_j|$$\n\nWhen $\\lambda > 0$ this penalty serves to prevent the optimization from\nassigning too much weight to more complex subnetworks according to the\ncomplexity measure function $r$.\n\n## How AdaNet uses the objective\n\nThis objective function serves two purposes:\n\n1. To **learn to scale/transform the outputs of each subnetwork $h$** as part\n of the ensemble.\n2. To **select the best candidate subnetwork $h$** at each AdaNet iteration\n to include in the ensemble.\n\nEffectively, when learning mixture weights $w$, AdaNet solves a convex\ncombination of the outputs of the frozen subnetworks $h$. For $\\lambda >0$,\nAdaNet penalizes more complex subnetworks with greater L1 regularization on\ntheir mixture weight, and will be less likely to select more complex subnetworks\nto add to the ensemble at each iteration.\n\nIn this tutorial, in you will observe the benefits of using AdaNet to learn the\nensemble's mixture weights and to perform candidate selection.\n\n",
"_____no_output_____"
]
],
[
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nimport adanet\nimport tensorflow as tf\n\n# The random seed to use.\nRANDOM_SEED = 42",
"_____no_output_____"
]
],
[
[
"## Boston Housing dataset\n\nIn this example, we will solve a regression task known as the [Boston Housing dataset](https://www.cs.toronto.edu/~delve/data/boston/bostonDetail.html) to predict the price of suburban houses in Boston, MA in the 1970s. There are 13 numerical features, the labels are in thousands of dollars, and there are only 506 examples.\n",
"_____no_output_____"
],
[
"## Download the data\nConveniently, the data is available via Keras:",
"_____no_output_____"
]
],
[
[
"(x_train, y_train), (x_test, y_test) = (\n tf.keras.datasets.boston_housing.load_data())",
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/boston_housing.npz\n57344/57026 [==============================] - 0s 1us/step\n"
],
[
"print(x_test.shape)\nprint(x_test[0])\nprint(y_test.shape)\nprint(y_test[0])",
"(102, 13)\n[ 18.0846 0. 18.1 0. 0.679 6.434 100. 1.8347\n 24. 666. 20.2 27.25 29.05 ]\n(102,)\n7.2\n"
]
],
[
[
"## Supply the data in TensorFlow\n\nOur first task is to supply the data in TensorFlow. Using the\ntf.estimator.Estimator convention, we will define a function that returns an\ninput_fn which returns feature and label Tensors.\n\nWe will also use the tf.data.Dataset API to feed the data into our models.\n\nAlso, as a preprocessing step, we will apply `tf.log1p` to log-scale the\nfeatures and labels for improved numerical stability during training. To recover\nthe model's predictions in the correct scale, you can apply `tf.math.expm1` to the\nprediction.",
"_____no_output_____"
]
],
[
[
"FEATURES_KEY = \"x\"\n\n\ndef input_fn(partition, training, batch_size):\n \"\"\"Generate an input function for the Estimator.\"\"\"\n\n def _input_fn():\n\n if partition == \"train\":\n dataset = tf.data.Dataset.from_tensor_slices(({\n FEATURES_KEY: tf.log1p(x_train)\n }, tf.log1p(y_train)))\n else:\n dataset = tf.data.Dataset.from_tensor_slices(({\n FEATURES_KEY: tf.log1p(x_test)\n }, tf.log1p(y_test)))\n\n # We call repeat after shuffling, rather than before, to prevent separate\n # epochs from blending together.\n if training:\n dataset = dataset.shuffle(10 * batch_size, seed=RANDOM_SEED).repeat()\n\n dataset = dataset.batch(batch_size)\n iterator = dataset.make_one_shot_iterator()\n features, labels = iterator.get_next()\n return features, labels\n\n return _input_fn",
"_____no_output_____"
]
],
[
[
"## Define the subnetwork generator\n\nLet's define a subnetwork generator similar to the one in\n[[Cortes et al., ICML 2017](https://arxiv.org/abs/1607.01097)] and in\n`simple_dnn.py` which creates two candidate fully-connected neural networks at\neach iteration with the same width, but one an additional hidden layer. To make\nour generator *adaptive*, each subnetwork will have at least the same number\nof hidden layers as the most recently added subnetwork to the\n`previous_ensemble`.\n\nWe define the complexity measure function $r$ to be $r(h) = \\sqrt{d(h)}$, where\n$d$ is the number of hidden layers in the neural network $h$, to approximate the\nRademacher bounds from\n[[Golowich et. al, 2017](https://arxiv.org/abs/1712.06541)]. So subnetworks\nwith more hidden layers, and therefore more capacity, will have more heavily\nregularized mixture weights.",
"_____no_output_____"
]
],
[
[
"_NUM_LAYERS_KEY = \"num_layers\"\n\n\nclass _SimpleDNNBuilder(adanet.subnetwork.Builder):\n \"\"\"Builds a DNN subnetwork for AdaNet.\"\"\"\n\n def __init__(self, optimizer, layer_size, num_layers, learn_mixture_weights,\n seed):\n \"\"\"Initializes a `_DNNBuilder`.\n\n Args:\n optimizer: An `Optimizer` instance for training both the subnetwork and\n the mixture weights.\n layer_size: The number of nodes to output at each hidden layer.\n num_layers: The number of hidden layers.\n learn_mixture_weights: Whether to solve a learning problem to find the\n best mixture weights, or use their default value according to the\n mixture weight type. When `False`, the subnetworks will return a no_op\n for the mixture weight train op.\n seed: A random seed.\n\n Returns:\n An instance of `_SimpleDNNBuilder`.\n \"\"\"\n\n self._optimizer = optimizer\n self._layer_size = layer_size\n self._num_layers = num_layers\n self._learn_mixture_weights = learn_mixture_weights\n self._seed = seed\n\n def build_subnetwork(self,\n features,\n logits_dimension,\n training,\n iteration_step,\n summary,\n previous_ensemble=None):\n \"\"\"See `adanet.subnetwork.Builder`.\"\"\"\n\n input_layer = tf.to_float(features[FEATURES_KEY])\n kernel_initializer = tf.glorot_uniform_initializer(seed=self._seed)\n last_layer = input_layer\n for _ in range(self._num_layers):\n last_layer = tf.layers.dense(\n last_layer,\n units=self._layer_size,\n activation=tf.nn.relu,\n kernel_initializer=kernel_initializer)\n logits = tf.layers.dense(\n last_layer,\n units=logits_dimension,\n kernel_initializer=kernel_initializer)\n\n persisted_tensors = {_NUM_LAYERS_KEY: tf.constant(self._num_layers)}\n return adanet.Subnetwork(\n last_layer=last_layer,\n logits=logits,\n complexity=self._measure_complexity(),\n persisted_tensors=persisted_tensors)\n\n def _measure_complexity(self):\n \"\"\"Approximates Rademacher complexity as the square-root of the depth.\"\"\"\n return tf.sqrt(tf.to_float(self._num_layers))\n\n def build_subnetwork_train_op(self, subnetwork, loss, var_list, labels,\n iteration_step, summary, previous_ensemble):\n \"\"\"See `adanet.subnetwork.Builder`.\"\"\"\n return self._optimizer.minimize(loss=loss, var_list=var_list)\n\n def build_mixture_weights_train_op(self, loss, var_list, logits, labels,\n iteration_step, summary):\n \"\"\"See `adanet.subnetwork.Builder`.\"\"\"\n\n if not self._learn_mixture_weights:\n return tf.no_op()\n return self._optimizer.minimize(loss=loss, var_list=var_list)\n\n @property\n def name(self):\n \"\"\"See `adanet.subnetwork.Builder`.\"\"\"\n\n if self._num_layers == 0:\n # A DNN with no hidden layers is a linear model.\n return \"linear\"\n return \"{}_layer_dnn\".format(self._num_layers)\n\n\nclass SimpleDNNGenerator(adanet.subnetwork.Generator):\n \"\"\"Generates a two DNN subnetworks at each iteration.\n\n The first DNN has an identical shape to the most recently added subnetwork\n in `previous_ensemble`. The second has the same shape plus one more dense\n layer on top. This is similar to the adaptive network presented in Figure 2 of\n [Cortes et al. ICML 2017](https://arxiv.org/abs/1607.01097), without the\n connections to hidden layers of networks from previous iterations.\n \"\"\"\n\n def __init__(self,\n optimizer,\n layer_size=32,\n learn_mixture_weights=False,\n seed=None):\n \"\"\"Initializes a DNN `Generator`.\n\n Args:\n optimizer: An `Optimizer` instance for training both the subnetwork and\n the mixture weights.\n layer_size: Number of nodes in each hidden layer of the subnetwork\n candidates. Note that this parameter is ignored in a DNN with no hidden\n layers.\n learn_mixture_weights: Whether to solve a learning problem to find the\n best mixture weights, or use their default value according to the\n mixture weight type. When `False`, the subnetworks will return a no_op\n for the mixture weight train op.\n seed: A random seed.\n\n Returns:\n An instance of `Generator`.\n \"\"\"\n\n self._seed = seed\n self._dnn_builder_fn = functools.partial(\n _SimpleDNNBuilder,\n optimizer=optimizer,\n layer_size=layer_size,\n learn_mixture_weights=learn_mixture_weights)\n\n def generate_candidates(self, previous_ensemble, iteration_number,\n previous_ensemble_reports, all_reports):\n \"\"\"See `adanet.subnetwork.Generator`.\"\"\"\n\n num_layers = 0\n seed = self._seed\n if previous_ensemble:\n num_layers = tf.contrib.util.constant_value(\n previous_ensemble.weighted_subnetworks[\n -1].subnetwork.persisted_tensors[_NUM_LAYERS_KEY])\n if seed is not None:\n seed += iteration_number\n return [\n self._dnn_builder_fn(num_layers=num_layers, seed=seed),\n self._dnn_builder_fn(num_layers=num_layers + 1, seed=seed),\n ]",
"_____no_output_____"
]
],
[
[
"## Train and evaluate\n\nNext we create an `adanet.Estimator` using the `SimpleDNNGenerator` we just defined.\n\nIn this section we will show the effects of two hyperparamters: **learning mixture weights** and **complexity regularization**.\n\nOn the righthand side you will be able to play with the hyperparameters of this model. Until you reach the end of this section, we ask that you not change them. \n\nAt first we will not learn the mixture weights, using their default initial value. Here they will be scalars initialized to $1/N$ where $N$ is the number of subnetworks in the ensemble, effectively creating a **uniform average ensemble**.",
"_____no_output_____"
]
],
[
[
"#@title AdaNet parameters\nLEARNING_RATE = 0.001 #@param {type:\"number\"}\nTRAIN_STEPS = 100000 #@param {type:\"integer\"}\nBATCH_SIZE = 32 #@param {type:\"integer\"}\n\nLEARN_MIXTURE_WEIGHTS = False #@param {type:\"boolean\"}\nADANET_LAMBDA = 0 #@param {type:\"number\"}\nBOOSTING_ITERATIONS = 5 #@param {type:\"integer\"}\n\n\ndef train_and_evaluate(learn_mixture_weights=LEARN_MIXTURE_WEIGHTS,\n adanet_lambda=ADANET_LAMBDA):\n \"\"\"Trains an `adanet.Estimator` to predict housing prices.\"\"\"\n\n estimator = adanet.Estimator(\n # Since we are predicting housing prices, we'll use a regression\n # head that optimizes for MSE.\n head=tf.contrib.estimator.regression_head(\n loss_reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE),\n\n # Define the generator, which defines our search space of subnetworks\n # to train as candidates to add to the final AdaNet model.\n subnetwork_generator=SimpleDNNGenerator(\n optimizer=tf.train.RMSPropOptimizer(learning_rate=LEARNING_RATE),\n learn_mixture_weights=learn_mixture_weights,\n seed=RANDOM_SEED),\n\n # Lambda is a the strength of complexity regularization. A larger\n # value will penalize more complex subnetworks.\n adanet_lambda=adanet_lambda,\n\n # The number of train steps per iteration.\n max_iteration_steps=TRAIN_STEPS // BOOSTING_ITERATIONS,\n\n # The evaluator will evaluate the model on the full training set to\n # compute the overall AdaNet loss (train loss + complexity\n # regularization) to select the best candidate to include in the\n # final AdaNet model.\n evaluator=adanet.Evaluator(\n input_fn=input_fn(\"train\", training=False, batch_size=BATCH_SIZE)),\n\n # Configuration for Estimators.\n config=tf.estimator.RunConfig(\n save_checkpoints_steps=50000,\n save_summary_steps=50000,\n tf_random_seed=RANDOM_SEED))\n\n # Train and evaluate using using the tf.estimator tooling.\n train_spec = tf.estimator.TrainSpec(\n input_fn=input_fn(\"train\", training=True, batch_size=BATCH_SIZE),\n max_steps=TRAIN_STEPS)\n eval_spec = tf.estimator.EvalSpec(\n input_fn=input_fn(\"test\", training=False, batch_size=BATCH_SIZE),\n steps=None)\n return tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)\n\n\ndef ensemble_architecture(result):\n \"\"\"Extracts the ensemble architecture from evaluation results.\"\"\"\n\n architecture = result[\"architecture/adanet/ensembles\"]\n # The architecture is a serialized Summary proto for TensorBoard.\n summary_proto = tf.summary.Summary.FromString(architecture)\n return summary_proto.value[0].tensor.string_val[0]\n\n\nresults, _ = train_and_evaluate()\nprint(\"Loss:\", results[\"average_loss\"])\nprint(\"Architecture:\", ensemble_architecture(results))",
"WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmplcezpthw\nINFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmplcezpthw', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f8074e7df28>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n }\n}\n, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}\nINFO:tensorflow:Running training and evaluation locally (non-distributed).\nINFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.\nINFO:tensorflow:Beginning training AdaNet iteration 0\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 0 into /tmp/tmplcezpthw/model.ckpt.\nINFO:tensorflow:loss = 21.773132, step = 1\nINFO:tensorflow:global_step/sec: 211.077\nINFO:tensorflow:loss = 0.647101, step = 101 (0.478 sec)\nINFO:tensorflow:global_step/sec: 429.504\nINFO:tensorflow:loss = 0.58654255, step = 201 (0.230 sec)\nINFO:tensorflow:global_step/sec: 423.494\nINFO:tensorflow:loss = 0.07683477, step = 301 (0.236 sec)\nINFO:tensorflow:global_step/sec: 432.097\nINFO:tensorflow:loss = 0.08281773, step = 401 (0.232 sec)\nINFO:tensorflow:global_step/sec: 413.752\nINFO:tensorflow:loss = 0.08148783, step = 501 (0.242 sec)\nINFO:tensorflow:global_step/sec: 459.975\nINFO:tensorflow:loss = 0.056522056, step = 601 (0.219 sec)\nINFO:tensorflow:global_step/sec: 458.298\nINFO:tensorflow:loss = 0.025881834, step = 701 (0.215 sec)\nINFO:tensorflow:global_step/sec: 419.078\nINFO:tensorflow:loss = 0.030095303, step = 801 (0.242 sec)\nINFO:tensorflow:global_step/sec: 455.713\nINFO:tensorflow:loss = 0.03755439, step = 901 (0.220 sec)\nINFO:tensorflow:global_step/sec: 444.218\nINFO:tensorflow:loss = 0.06690022, step = 1001 (0.225 sec)\nINFO:tensorflow:global_step/sec: 451.699\nINFO:tensorflow:loss = 0.03615122, step = 1101 (0.222 sec)\nINFO:tensorflow:global_step/sec: 457.472\nINFO:tensorflow:loss = 0.050185308, step = 1201 (0.218 sec)\nINFO:tensorflow:global_step/sec: 462.55\nINFO:tensorflow:loss = 0.099214725, step = 1301 (0.216 sec)\nINFO:tensorflow:global_step/sec: 436.246\nINFO:tensorflow:loss = 0.026417175, step = 1401 (0.227 sec)\nINFO:tensorflow:global_step/sec: 432.357\nINFO:tensorflow:loss = 0.02078271, step = 1501 (0.231 sec)\nINFO:tensorflow:global_step/sec: 407.845\nINFO:tensorflow:loss = 0.03165562, step = 1601 (0.245 sec)\nINFO:tensorflow:global_step/sec: 409.88\nINFO:tensorflow:loss = 0.041417733, step = 1701 (0.244 sec)\nINFO:tensorflow:global_step/sec: 447.158\nINFO:tensorflow:loss = 0.035114042, step = 1801 (0.226 sec)\nINFO:tensorflow:global_step/sec: 460.335\nINFO:tensorflow:loss = 0.044721745, step = 1901 (0.218 sec)\nINFO:tensorflow:global_step/sec: 442.593\nINFO:tensorflow:loss = 0.029930545, step = 2001 (0.223 sec)\nINFO:tensorflow:global_step/sec: 459.624\nINFO:tensorflow:loss = 0.04725883, step = 2101 (0.218 sec)\nINFO:tensorflow:global_step/sec: 462.613\nINFO:tensorflow:loss = 0.024880452, step = 2201 (0.218 sec)\nINFO:tensorflow:global_step/sec: 462.628\nINFO:tensorflow:loss = 0.024809994, step = 2301 (0.216 sec)\nINFO:tensorflow:global_step/sec: 442.364\nINFO:tensorflow:loss = 0.022308666, step = 2401 (0.227 sec)\nINFO:tensorflow:global_step/sec: 434.897\nINFO:tensorflow:loss = 0.04762791, step = 2501 (0.227 sec)\nINFO:tensorflow:global_step/sec: 473.857\nINFO:tensorflow:loss = 0.03194421, step = 2601 (0.210 sec)\nINFO:tensorflow:global_step/sec: 461.601\nINFO:tensorflow:loss = 0.033454657, step = 2701 (0.219 sec)\nINFO:tensorflow:global_step/sec: 459.118\nINFO:tensorflow:loss = 0.014480978, step = 2801 (0.218 sec)\nINFO:tensorflow:global_step/sec: 445.164\nINFO:tensorflow:loss = 0.031083336, step = 2901 (0.222 sec)\nINFO:tensorflow:global_step/sec: 447.55\nINFO:tensorflow:loss = 0.026340332, step = 3001 (0.226 sec)\nINFO:tensorflow:global_step/sec: 463.595\nINFO:tensorflow:loss = 0.02651683, step = 3101 (0.213 sec)\nINFO:tensorflow:global_step/sec: 468.642\nINFO:tensorflow:loss = 0.027183883, step = 3201 (0.214 sec)\nINFO:tensorflow:global_step/sec: 463.448\nINFO:tensorflow:loss = 0.035816483, step = 3301 (0.218 sec)\nINFO:tensorflow:global_step/sec: 462.425\nINFO:tensorflow:loss = 0.02551706, step = 3401 (0.214 sec)\nINFO:tensorflow:global_step/sec: 457.828\nINFO:tensorflow:loss = 0.049349364, step = 3501 (0.219 sec)\nINFO:tensorflow:global_step/sec: 463.545\nINFO:tensorflow:loss = 0.024015253, step = 3601 (0.216 sec)\nINFO:tensorflow:global_step/sec: 463.739\nINFO:tensorflow:loss = 0.017241174, step = 3701 (0.216 sec)\nINFO:tensorflow:global_step/sec: 469.595\nINFO:tensorflow:loss = 0.020121489, step = 3801 (0.212 sec)\nINFO:tensorflow:global_step/sec: 468.923\nINFO:tensorflow:loss = 0.021484237, step = 3901 (0.214 sec)\nINFO:tensorflow:global_step/sec: 453.121\nINFO:tensorflow:loss = 0.037488014, step = 4001 (0.221 sec)\nINFO:tensorflow:global_step/sec: 438.323\nINFO:tensorflow:loss = 0.040071916, step = 4101 (0.228 sec)\nINFO:tensorflow:global_step/sec: 410.215\nINFO:tensorflow:loss = 0.021272995, step = 4201 (0.244 sec)\nINFO:tensorflow:global_step/sec: 457.032\nINFO:tensorflow:loss = 0.03338682, step = 4301 (0.219 sec)\nINFO:tensorflow:global_step/sec: 429.693\nINFO:tensorflow:loss = 0.036143243, step = 4401 (0.232 sec)\nINFO:tensorflow:global_step/sec: 432.626\nINFO:tensorflow:loss = 0.039583378, step = 4501 (0.234 sec)\nINFO:tensorflow:global_step/sec: 427.591\nINFO:tensorflow:loss = 0.036702216, step = 4601 (0.235 sec)\nINFO:tensorflow:global_step/sec: 427.303\nINFO:tensorflow:loss = 0.05008479, step = 4701 (0.231 sec)\nINFO:tensorflow:global_step/sec: 453.169\nINFO:tensorflow:loss = 0.0439879, step = 4801 (0.220 sec)\nINFO:tensorflow:global_step/sec: 462.178\nINFO:tensorflow:loss = 0.023454221, step = 4901 (0.217 sec)\nINFO:tensorflow:global_step/sec: 468.888\nINFO:tensorflow:loss = 0.014781383, step = 5001 (0.213 sec)\nINFO:tensorflow:global_step/sec: 463.829\nINFO:tensorflow:loss = 0.020877432, step = 5101 (0.217 sec)\nINFO:tensorflow:global_step/sec: 465.293\nINFO:tensorflow:loss = 0.028106665, step = 5201 (0.212 sec)\nINFO:tensorflow:global_step/sec: 447\nINFO:tensorflow:loss = 0.044017084, step = 5301 (0.227 sec)\nINFO:tensorflow:global_step/sec: 442.253\nINFO:tensorflow:loss = 0.015634855, step = 5401 (0.223 sec)\nINFO:tensorflow:global_step/sec: 468.506\nINFO:tensorflow:loss = 0.017649759, step = 5501 (0.214 sec)\nINFO:tensorflow:global_step/sec: 425.122\nINFO:tensorflow:loss = 0.026881203, step = 5601 (0.235 sec)\nINFO:tensorflow:global_step/sec: 392.981\nINFO:tensorflow:loss = 0.02515915, step = 5701 (0.255 sec)\nINFO:tensorflow:global_step/sec: 422.847\nINFO:tensorflow:loss = 0.03226296, step = 5801 (0.236 sec)\nINFO:tensorflow:global_step/sec: 372.411\nINFO:tensorflow:loss = 0.014366373, step = 5901 (0.269 sec)\nINFO:tensorflow:global_step/sec: 333.939\nINFO:tensorflow:loss = 0.020684633, step = 6001 (0.303 sec)\nINFO:tensorflow:global_step/sec: 330.503\nINFO:tensorflow:loss = 0.035918076, step = 6101 (0.299 sec)\nINFO:tensorflow:global_step/sec: 357.144\nINFO:tensorflow:loss = 0.052825905, step = 6201 (0.279 sec)\nINFO:tensorflow:global_step/sec: 355.8\nINFO:tensorflow:loss = 0.026814178, step = 6301 (0.283 sec)\nINFO:tensorflow:global_step/sec: 412.615\nINFO:tensorflow:loss = 0.03537807, step = 6401 (0.243 sec)\nINFO:tensorflow:global_step/sec: 331.551\nINFO:tensorflow:loss = 0.041909292, step = 6501 (0.302 sec)\nINFO:tensorflow:global_step/sec: 321.808\nINFO:tensorflow:loss = 0.025281452, step = 6601 (0.311 sec)\n"
]
],
[
[
"These hyperparameters preduce a model that achieves **0.0348** MSE on the test\nset. Notice that the ensemble is composed of 5 subnetworks, each one a hidden\nlayer deeper than the previous. The most complex subnetwork is made of 5 hidden\nlayers.\n\nSince `SimpleDNNGenerator` produces subnetworks of varying complexity, and our\nmodel gives each one an equal weight, AdaNet selected the subnetwork that most\nlowered the ensemble's training loss at each iteration, likely the one with the\nmost hidden layers, since it has the most capacity, and we aren't penalizing\nmore complex subnetworks (yet).\n\nNext, instead of assigning equal weight to each subnetwork, let's learn the\nmixture weights as a convex optimization problem using SGD:",
"_____no_output_____"
]
],
[
[
"#@test {\"skip\": true}\nresults, _ = train_and_evaluate(learn_mixture_weights=True)\nprint(\"Loss:\", results[\"average_loss\"])\nprint(\"Uniform average loss:\", results[\"average_loss/adanet/uniform_average_ensemble\"])\nprint(\"Architecture:\", ensemble_architecture(results))",
"WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpsbdccn23\nINFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmpsbdccn23', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f8029968a90>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n }\n}\n, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}\nINFO:tensorflow:Running training and evaluation locally (non-distributed).\nINFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.\nINFO:tensorflow:Beginning training AdaNet iteration 0\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 0 into /tmp/tmpsbdccn23/model.ckpt.\nINFO:tensorflow:loss = 21.773132, step = 1\nINFO:tensorflow:global_step/sec: 151.659\nINFO:tensorflow:loss = 0.6285208, step = 101 (0.661 sec)\nINFO:tensorflow:global_step/sec: 377.914\nINFO:tensorflow:loss = 0.568697, step = 201 (0.264 sec)\nINFO:tensorflow:global_step/sec: 317.447\nINFO:tensorflow:loss = 0.07774219, step = 301 (0.318 sec)\nINFO:tensorflow:global_step/sec: 298.158\nINFO:tensorflow:loss = 0.08270247, step = 401 (0.332 sec)\nINFO:tensorflow:global_step/sec: 421.096\nINFO:tensorflow:loss = 0.08153409, step = 501 (0.237 sec)\nINFO:tensorflow:global_step/sec: 414.588\nINFO:tensorflow:loss = 0.05655239, step = 601 (0.241 sec)\nINFO:tensorflow:global_step/sec: 341.393\nINFO:tensorflow:loss = 0.025883064, step = 701 (0.293 sec)\nINFO:tensorflow:global_step/sec: 366.02\nINFO:tensorflow:loss = 0.030127691, step = 801 (0.275 sec)\nINFO:tensorflow:global_step/sec: 427.488\nINFO:tensorflow:loss = 0.03756215, step = 901 (0.232 sec)\nINFO:tensorflow:global_step/sec: 353.863\nINFO:tensorflow:loss = 0.06788642, step = 1001 (0.285 sec)\nINFO:tensorflow:global_step/sec: 322.318\nINFO:tensorflow:loss = 0.036306262, step = 1101 (0.310 sec)\nINFO:tensorflow:global_step/sec: 413.289\nINFO:tensorflow:loss = 0.05074877, step = 1201 (0.240 sec)\nINFO:tensorflow:global_step/sec: 321.58\nINFO:tensorflow:loss = 0.10058461, step = 1301 (0.311 sec)\nINFO:tensorflow:global_step/sec: 300.699\nINFO:tensorflow:loss = 0.026643617, step = 1401 (0.334 sec)\nINFO:tensorflow:global_step/sec: 318.013\nINFO:tensorflow:loss = 0.020885482, step = 1501 (0.313 sec)\nINFO:tensorflow:global_step/sec: 323.705\nINFO:tensorflow:loss = 0.03239681, step = 1601 (0.315 sec)\nINFO:tensorflow:global_step/sec: 328.631\nINFO:tensorflow:loss = 0.04160305, step = 1701 (0.298 sec)\nINFO:tensorflow:global_step/sec: 397.201\nINFO:tensorflow:loss = 0.0352926, step = 1801 (0.251 sec)\nINFO:tensorflow:global_step/sec: 342.005\nINFO:tensorflow:loss = 0.044745784, step = 1901 (0.296 sec)\nINFO:tensorflow:global_step/sec: 425.216\nINFO:tensorflow:loss = 0.02993768, step = 2001 (0.233 sec)\nINFO:tensorflow:global_step/sec: 425.851\nINFO:tensorflow:loss = 0.047246575, step = 2101 (0.234 sec)\nINFO:tensorflow:global_step/sec: 290.003\nINFO:tensorflow:loss = 0.024866767, step = 2201 (0.346 sec)\nINFO:tensorflow:global_step/sec: 306.232\nINFO:tensorflow:loss = 0.025053538, step = 2301 (0.332 sec)\nINFO:tensorflow:global_step/sec: 319.194\nINFO:tensorflow:loss = 0.022536863, step = 2401 (0.315 sec)\nINFO:tensorflow:global_step/sec: 327.319\nINFO:tensorflow:loss = 0.04780043, step = 2501 (0.299 sec)\nINFO:tensorflow:global_step/sec: 330.195\nINFO:tensorflow:loss = 0.032027524, step = 2601 (0.302 sec)\nINFO:tensorflow:global_step/sec: 424.554\nINFO:tensorflow:loss = 0.033754565, step = 2701 (0.237 sec)\nINFO:tensorflow:global_step/sec: 415.456\nINFO:tensorflow:loss = 0.014495807, step = 2801 (0.243 sec)\nINFO:tensorflow:global_step/sec: 378.815\nINFO:tensorflow:loss = 0.031205792, step = 2901 (0.259 sec)\nINFO:tensorflow:global_step/sec: 435.675\nINFO:tensorflow:loss = 0.026793242, step = 3001 (0.233 sec)\nINFO:tensorflow:global_step/sec: 445.07\nINFO:tensorflow:loss = 0.02696861, step = 3101 (0.222 sec)\nINFO:tensorflow:global_step/sec: 411.002\nINFO:tensorflow:loss = 0.027100282, step = 3201 (0.243 sec)\nINFO:tensorflow:global_step/sec: 452.535\nINFO:tensorflow:loss = 0.03591666, step = 3301 (0.221 sec)\nINFO:tensorflow:global_step/sec: 390.136\nINFO:tensorflow:loss = 0.025515229, step = 3401 (0.257 sec)\nINFO:tensorflow:global_step/sec: 403.819\nINFO:tensorflow:loss = 0.049373504, step = 3501 (0.247 sec)\nINFO:tensorflow:global_step/sec: 441.761\nINFO:tensorflow:loss = 0.024171133, step = 3601 (0.230 sec)\nINFO:tensorflow:global_step/sec: 438.165\nINFO:tensorflow:loss = 0.017237274, step = 3701 (0.228 sec)\nINFO:tensorflow:global_step/sec: 442.471\nINFO:tensorflow:loss = 0.020128746, step = 3801 (0.224 sec)\nINFO:tensorflow:global_step/sec: 443.692\nINFO:tensorflow:loss = 0.021598278, step = 3901 (0.225 sec)\nINFO:tensorflow:global_step/sec: 433.398\nINFO:tensorflow:loss = 0.03772788, step = 4001 (0.230 sec)\nINFO:tensorflow:global_step/sec: 453.543\nINFO:tensorflow:loss = 0.040997066, step = 4101 (0.220 sec)\nINFO:tensorflow:global_step/sec: 447.837\nINFO:tensorflow:loss = 0.021314848, step = 4201 (0.223 sec)\nINFO:tensorflow:global_step/sec: 449.319\nINFO:tensorflow:loss = 0.03397343, step = 4301 (0.222 sec)\nINFO:tensorflow:global_step/sec: 291.817\nINFO:tensorflow:loss = 0.03742571, step = 4401 (0.343 sec)\nINFO:tensorflow:global_step/sec: 349.156\nINFO:tensorflow:loss = 0.04003142, step = 4501 (0.287 sec)\nINFO:tensorflow:global_step/sec: 444.919\nINFO:tensorflow:loss = 0.037306767, step = 4601 (0.224 sec)\nINFO:tensorflow:global_step/sec: 324.799\nINFO:tensorflow:loss = 0.050043724, step = 4701 (0.308 sec)\nINFO:tensorflow:global_step/sec: 399.035\nINFO:tensorflow:loss = 0.04509888, step = 4801 (0.250 sec)\nINFO:tensorflow:global_step/sec: 342.386\nINFO:tensorflow:loss = 0.023579072, step = 4901 (0.293 sec)\nINFO:tensorflow:global_step/sec: 435.009\nINFO:tensorflow:loss = 0.014783351, step = 5001 (0.230 sec)\nINFO:tensorflow:global_step/sec: 465.426\nINFO:tensorflow:loss = 0.021115372, step = 5101 (0.214 sec)\nINFO:tensorflow:global_step/sec: 379.114\nINFO:tensorflow:loss = 0.02869285, step = 5201 (0.263 sec)\nINFO:tensorflow:global_step/sec: 446.474\nINFO:tensorflow:loss = 0.044227358, step = 5301 (0.224 sec)\nINFO:tensorflow:global_step/sec: 442.508\nINFO:tensorflow:loss = 0.015665509, step = 5401 (0.229 sec)\nINFO:tensorflow:global_step/sec: 439.36\nINFO:tensorflow:loss = 0.017735064, step = 5501 (0.225 sec)\nINFO:tensorflow:global_step/sec: 452.882\nINFO:tensorflow:loss = 0.026888551, step = 5601 (0.220 sec)\nINFO:tensorflow:global_step/sec: 450.627\nINFO:tensorflow:loss = 0.025225505, step = 5701 (0.224 sec)\nINFO:tensorflow:global_step/sec: 455.843\nINFO:tensorflow:loss = 0.032536294, step = 5801 (0.218 sec)\nINFO:tensorflow:global_step/sec: 453.967\nINFO:tensorflow:loss = 0.014429852, step = 5901 (0.220 sec)\nINFO:tensorflow:global_step/sec: 446.021\nINFO:tensorflow:loss = 0.020685814, step = 6001 (0.226 sec)\nINFO:tensorflow:global_step/sec: 447.631\nINFO:tensorflow:loss = 0.035909995, step = 6101 (0.221 sec)\nINFO:tensorflow:global_step/sec: 454.564\nINFO:tensorflow:loss = 0.053759962, step = 6201 (0.220 sec)\nINFO:tensorflow:global_step/sec: 385.801\nINFO:tensorflow:loss = 0.02680358, step = 6301 (0.263 sec)\nINFO:tensorflow:global_step/sec: 380.766\nINFO:tensorflow:loss = 0.035358958, step = 6401 (0.262 sec)\nINFO:tensorflow:global_step/sec: 384.454\nINFO:tensorflow:loss = 0.04194645, step = 6501 (0.262 sec)\nINFO:tensorflow:global_step/sec: 324.174\nINFO:tensorflow:loss = 0.025395717, step = 6601 (0.309 sec)\n"
]
],
[
[
"Learning the mixture weights produces a model with **0.0449** MSE, a bit worse\nthan the uniform average model, which the `adanet.Estimator` always compute as a\nbaseline. The mixture weights were learned without regularization, so they\nlikely overfit to the training set.\n\nObserve that AdaNet learned the same ensemble composition as the previous run.\nWithout complexity regularization, AdaNet will favor more complex subnetworks,\nwhich may have worse generalization despite improving the empirical error.\n\nFinally, let's apply some **complexity regularization** by using $\\lambda > 0$.\nSince this will penalize more complex subnetworks, AdaNet will select the\ncandidate subnetwork that most improves the objective for its marginal\ncomplexity:",
"_____no_output_____"
]
],
[
[
"#@test {\"skip\": true}\nresults, _ = train_and_evaluate(learn_mixture_weights=True, adanet_lambda=.015)\nprint(\"Loss:\", results[\"average_loss\"])\nprint(\"Uniform average loss:\", results[\"average_loss/adanet/uniform_average_ensemble\"])\nprint(\"Architecture:\", ensemble_architecture(results))",
"WARNING:tensorflow:Using temporary folder as model directory: /tmp/tmpyxwongpm\nINFO:tensorflow:Using config: {'_save_checkpoints_secs': None, '_experimental_distribute': None, '_service': None, '_task_id': 0, '_is_chief': True, '_master': '', '_evaluation_master': '', '_train_distribute': None, '_model_dir': '/tmp/tmpyxwongpm', '_cluster_spec': <tensorflow.python.training.server_lib.ClusterSpec object at 0x7f802a6f6668>, '_keep_checkpoint_every_n_hours': 10000, '_global_id_in_cluster': 0, '_keep_checkpoint_max': 5, '_save_checkpoints_steps': 50000, '_tf_random_seed': 42, '_session_config': allow_soft_placement: true\ngraph_options {\n rewrite_options {\n meta_optimizer_iterations: ONE\n }\n}\n, '_protocol': None, '_device_fn': None, '_save_summary_steps': 50000, '_num_ps_replicas': 0, '_eval_distribute': None, '_num_worker_replicas': 1, '_log_step_count_steps': 100, '_task_type': 'worker'}\nINFO:tensorflow:Running training and evaluation locally (non-distributed).\nINFO:tensorflow:Start train and evaluate loop. The evaluate will happen after every checkpoint. Checkpoint frequency is determined based on RunConfig arguments: save_checkpoints_steps 50000 or save_checkpoints_secs None.\nINFO:tensorflow:Beginning training AdaNet iteration 0\nINFO:tensorflow:Calling model_fn.\nINFO:tensorflow:Done calling model_fn.\nINFO:tensorflow:Create CheckpointSaverHook.\nINFO:tensorflow:Graph was finalized.\nINFO:tensorflow:Running local_init_op.\nINFO:tensorflow:Done running local_init_op.\nINFO:tensorflow:Saving checkpoints for 0 into /tmp/tmpyxwongpm/model.ckpt.\nINFO:tensorflow:loss = 21.773132, step = 1\nINFO:tensorflow:global_step/sec: 140.958\nINFO:tensorflow:loss = 0.62784123, step = 101 (0.711 sec)\nINFO:tensorflow:global_step/sec: 316.671\nINFO:tensorflow:loss = 0.56678665, step = 201 (0.315 sec)\nINFO:tensorflow:global_step/sec: 300.513\nINFO:tensorflow:loss = 0.078039765, step = 301 (0.333 sec)\nINFO:tensorflow:global_step/sec: 299.365\nINFO:tensorflow:loss = 0.086782694, step = 401 (0.334 sec)\nINFO:tensorflow:global_step/sec: 305.519\nINFO:tensorflow:loss = 0.08137445, step = 501 (0.327 sec)\nINFO:tensorflow:global_step/sec: 310.289\nINFO:tensorflow:loss = 0.056509923, step = 601 (0.325 sec)\nINFO:tensorflow:global_step/sec: 319.378\nINFO:tensorflow:loss = 0.025883604, step = 701 (0.313 sec)\nINFO:tensorflow:global_step/sec: 216.501\nINFO:tensorflow:loss = 0.030180356, step = 801 (0.462 sec)\nINFO:tensorflow:global_step/sec: 232.224\nINFO:tensorflow:loss = 0.037590638, step = 901 (0.429 sec)\nINFO:tensorflow:global_step/sec: 249.671\nINFO:tensorflow:loss = 0.06694432, step = 1001 (0.405 sec)\nINFO:tensorflow:global_step/sec: 237.714\nINFO:tensorflow:loss = 0.038478173, step = 1101 (0.416 sec)\nINFO:tensorflow:global_step/sec: 321.145\nINFO:tensorflow:loss = 0.04998316, step = 1201 (0.311 sec)\nINFO:tensorflow:global_step/sec: 242.151\nINFO:tensorflow:loss = 0.09006661, step = 1301 (0.417 sec)\nINFO:tensorflow:global_step/sec: 308.934\nINFO:tensorflow:loss = 0.026879994, step = 1401 (0.319 sec)\nINFO:tensorflow:global_step/sec: 255.401\nINFO:tensorflow:loss = 0.021093277, step = 1501 (0.393 sec)\nINFO:tensorflow:global_step/sec: 332.521\nINFO:tensorflow:loss = 0.03607753, step = 1601 (0.300 sec)\nINFO:tensorflow:global_step/sec: 312.926\nINFO:tensorflow:loss = 0.03416162, step = 1701 (0.322 sec)\nINFO:tensorflow:global_step/sec: 211.064\nINFO:tensorflow:loss = 0.04626117, step = 1801 (0.471 sec)\nINFO:tensorflow:global_step/sec: 281.592\nINFO:tensorflow:loss = 0.07378492, step = 1901 (0.356 sec)\nINFO:tensorflow:global_step/sec: 282.328\nINFO:tensorflow:loss = 0.049188316, step = 2001 (0.354 sec)\nINFO:tensorflow:global_step/sec: 308.875\nINFO:tensorflow:loss = 0.078179166, step = 2101 (0.323 sec)\nINFO:tensorflow:global_step/sec: 334.139\nINFO:tensorflow:loss = 0.03029899, step = 2201 (0.299 sec)\nINFO:tensorflow:global_step/sec: 294.106\nINFO:tensorflow:loss = 0.024719719, step = 2301 (0.341 sec)\nINFO:tensorflow:global_step/sec: 332.18\nINFO:tensorflow:loss = 0.024992699, step = 2401 (0.301 sec)\nINFO:tensorflow:global_step/sec: 374.081\nINFO:tensorflow:loss = 0.04709203, step = 2501 (0.268 sec)\nINFO:tensorflow:global_step/sec: 368.409\nINFO:tensorflow:loss = 0.047214545, step = 2601 (0.270 sec)\nINFO:tensorflow:global_step/sec: 364.516\nINFO:tensorflow:loss = 0.038211394, step = 2701 (0.274 sec)\nINFO:tensorflow:global_step/sec: 345.828\nINFO:tensorflow:loss = 0.03274207, step = 2801 (0.294 sec)\nINFO:tensorflow:global_step/sec: 357.417\nINFO:tensorflow:loss = 0.04549656, step = 2901 (0.279 sec)\nINFO:tensorflow:global_step/sec: 352.133\nINFO:tensorflow:loss = 0.035480063, step = 3001 (0.285 sec)\nINFO:tensorflow:global_step/sec: 344.663\nINFO:tensorflow:loss = 0.024679933, step = 3101 (0.286 sec)\nINFO:tensorflow:global_step/sec: 382.242\nINFO:tensorflow:loss = 0.041259166, step = 3201 (0.261 sec)\nINFO:tensorflow:global_step/sec: 352.471\nINFO:tensorflow:loss = 0.04356738, step = 3301 (0.284 sec)\nINFO:tensorflow:global_step/sec: 384.285\nINFO:tensorflow:loss = 0.034602944, step = 3401 (0.259 sec)\nINFO:tensorflow:global_step/sec: 364.285\nINFO:tensorflow:loss = 0.069668576, step = 3501 (0.275 sec)\nINFO:tensorflow:global_step/sec: 371.728\nINFO:tensorflow:loss = 0.034798123, step = 3601 (0.273 sec)\nINFO:tensorflow:global_step/sec: 354.306\nINFO:tensorflow:loss = 0.021452527, step = 3701 (0.285 sec)\nINFO:tensorflow:global_step/sec: 350.869\nINFO:tensorflow:loss = 0.02612273, step = 3801 (0.283 sec)\nINFO:tensorflow:global_step/sec: 335.128\nINFO:tensorflow:loss = 0.031272262, step = 3901 (0.299 sec)\nINFO:tensorflow:global_step/sec: 342.451\nINFO:tensorflow:loss = 0.05301467, step = 4001 (0.286 sec)\nINFO:tensorflow:global_step/sec: 341.576\nINFO:tensorflow:loss = 0.02896322, step = 4101 (0.293 sec)\nINFO:tensorflow:global_step/sec: 366.845\nINFO:tensorflow:loss = 0.022142775, step = 4201 (0.277 sec)\nINFO:tensorflow:global_step/sec: 342.606\nINFO:tensorflow:loss = 0.02221645, step = 4301 (0.291 sec)\nINFO:tensorflow:global_step/sec: 306.676\nINFO:tensorflow:loss = 0.027055696, step = 4401 (0.323 sec)\nINFO:tensorflow:global_step/sec: 291.316\nINFO:tensorflow:loss = 0.050597515, step = 4501 (0.347 sec)\nINFO:tensorflow:global_step/sec: 353.302\nINFO:tensorflow:loss = 0.02597157, step = 4601 (0.283 sec)\nINFO:tensorflow:global_step/sec: 326.918\nINFO:tensorflow:loss = 0.079174936, step = 4701 (0.303 sec)\nINFO:tensorflow:global_step/sec: 356.635\nINFO:tensorflow:loss = 0.034027025, step = 4801 (0.280 sec)\nINFO:tensorflow:global_step/sec: 353.448\nINFO:tensorflow:loss = 0.033307478, step = 4901 (0.283 sec)\nINFO:tensorflow:global_step/sec: 384.233\nINFO:tensorflow:loss = 0.02684283, step = 5001 (0.261 sec)\nINFO:tensorflow:global_step/sec: 343.57\nINFO:tensorflow:loss = 0.039310887, step = 5101 (0.295 sec)\nINFO:tensorflow:global_step/sec: 358.382\nINFO:tensorflow:loss = 0.030656522, step = 5201 (0.277 sec)\nINFO:tensorflow:global_step/sec: 346.319\nINFO:tensorflow:loss = 0.078128755, step = 5301 (0.286 sec)\nINFO:tensorflow:global_step/sec: 321.706\nINFO:tensorflow:loss = 0.021291938, step = 5401 (0.315 sec)\nINFO:tensorflow:global_step/sec: 319.996\nINFO:tensorflow:loss = 0.032513306, step = 5501 (0.308 sec)\nINFO:tensorflow:global_step/sec: 342.397\nINFO:tensorflow:loss = 0.028400544, step = 5601 (0.293 sec)\nINFO:tensorflow:global_step/sec: 317.27\nINFO:tensorflow:loss = 0.034857225, step = 5701 (0.321 sec)\nINFO:tensorflow:global_step/sec: 316.78\nINFO:tensorflow:loss = 0.037171274, step = 5801 (0.314 sec)\nINFO:tensorflow:global_step/sec: 338.394\nINFO:tensorflow:loss = 0.017138816, step = 5901 (0.290 sec)\nINFO:tensorflow:global_step/sec: 329.102\nINFO:tensorflow:loss = 0.030491471, step = 6001 (0.312 sec)\nINFO:tensorflow:global_step/sec: 349.063\nINFO:tensorflow:loss = 0.048120163, step = 6101 (0.279 sec)\nINFO:tensorflow:global_step/sec: 339.279\nINFO:tensorflow:loss = 0.044583093, step = 6201 (0.295 sec)\nINFO:tensorflow:global_step/sec: 339.525\nINFO:tensorflow:loss = 0.04749337, step = 6301 (0.295 sec)\nINFO:tensorflow:global_step/sec: 334.616\nINFO:tensorflow:loss = 0.07128422, step = 6401 (0.304 sec)\nINFO:tensorflow:global_step/sec: 331.25\nINFO:tensorflow:loss = 0.05821591, step = 6501 (0.296 sec)\nINFO:tensorflow:global_step/sec: 335.526\nINFO:tensorflow:loss = 0.019353827, step = 6601 (0.298 sec)\n"
]
],
[
[
"Learning the mixture weights with $\\lambda > 0$ produces a model with **0.0320**\nMSE. Notice that this is even better than the uniform average ensemble produced\nfrom the chosen subnetworks with **0.0345** MSE.\n\nInspecting the ensemble architecture demonstrates the effects of complexity\nregularization on candidate selection. The selected subnetworks are relatively\nless complex: unlike in previous runs, the simplest subnetwork is a linear model\nand the deepest subnetwork has only 3 hidden layers.\n\nIn general, learning to combine subnetwork ouputs with optimal hyperparameters\nshould be at least as good assigning uniform average weights.",
"_____no_output_____"
],
[
"## Conclusion\n\nIn this tutorial, you were able to explore training an AdaNet model's mixture\nweights with $\\lambda \\ge 0$. You were also able to compare against building an\nensemble formed by always choosing the best candidate subnetwork at each\niteration based on it's ability to improve the ensemble's loss on the training\nset, and averaging their results.\n\nUniform average ensembles work unreasonably well in practice, yet learning the\nmixture weights with the correct values of $\\lambda$ and $\\beta$ should always\nproduce a better model when candidates have varying complexity. However, this\ndoes require some additional hyperparameter tuning, so practically you can train\nan AdaNet with the default mixture weights and $\\lambda=0$ first, and once you\nhave confirmed that the subnetworks are training correctly, you can tune the\nmixture weight hyperparameters.\n\nWhile this example explored a regression task, these observations apply to using\nAdaNet on other tasks like binary-classification and multi-class classification.",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
]
] |
e7fc62ad1656604644187e2d1edff78d0323ed9e | 5,614 | ipynb | Jupyter Notebook | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project | a05eeb66b14187bb18618f8cde17dc0f2c435ff8 | [
"CC-BY-4.0"
] | 3 | 2019-12-05T16:44:40.000Z | 2022-03-07T22:35:31.000Z | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project | a05eeb66b14187bb18618f8cde17dc0f2c435ff8 | [
"CC-BY-4.0"
] | 2 | 2021-05-28T19:11:05.000Z | 2021-05-31T13:22:54.000Z | CNN/Heatmap_demo.ipynb | ucl-exoplanets/DI-Project | a05eeb66b14187bb18618f8cde17dc0f2c435ff8 | [
"CC-BY-4.0"
] | 2 | 2020-07-15T17:31:17.000Z | 2020-10-21T19:24:42.000Z | 22.821138 | 128 | 0.519238 | [
[
[
"import keras.backend as K\nimport numpy as np\nfrom keras.models import Model,load_model\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.axes_grid1 import ImageGrid\n",
"_____no_output_____"
]
],
[
[
"## load test data and loc_map",
"_____no_output_____"
]
],
[
[
"test = np.load(\"checkpt/test_data.npy\")\nloc_map = np.load(\"checkpt/test_loc_map.npy\")\ntest_label = np.loadtxt(\"checkpt/test_label.txt\")",
"_____no_output_____"
],
[
"test_label.shape",
"_____no_output_____"
]
],
[
[
"## read checkpoint",
"_____no_output_____"
]
],
[
[
"model = load_model(\"checkpt/ckt/checkpt_0.h5\")",
"_____no_output_____"
],
[
"model.summary()",
"_____no_output_____"
],
[
"pred = model.predict(test)",
"_____no_output_____"
]
],
[
[
"## get True Positive ",
"_____no_output_____"
]
],
[
[
"## .argmax(axis =1 ) will return the biggest value of the two as 1, and the other as 0. i.e. [0.6 ,0.9] will give [0,1]\n## this is a good format as our test_label is organised in [0,1] or [1,0] format.",
"_____no_output_____"
],
[
"TP = np.where(pred.argmax(axis=1) == test_label.argmax(axis=1))",
"_____no_output_____"
],
[
"## I will suggest to access the confidence of the predication. Usually we want 0.9 at least",
"_____no_output_____"
],
[
"def return_heatmap(model, org_img, normalise=True):\n ## CAM code implementation\n ## we need to extract the last conv layer, and that depends on your architecture.\n test_img = model.output[:, 1]\n last_conv_layer = model.get_layer('conv2d_6')\n grads = K.gradients(test_img, last_conv_layer.output)[0]\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n message = K.print_tensor(pooled_grads, message='pool_grad = ')\n iterate = K.function([model.input, K.learning_phase()],\n [message, last_conv_layer.output[0]])\n pooled_grads_value, conv_layer_output_value = iterate([org_img.reshape(-1, 64, 64, 1), 0])\n for i in range(conv_layer_output_value.shape[2]):\n conv_layer_output_value[:, :, i] *= pooled_grads_value[i]\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n if normalise:\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n return heatmap",
"_____no_output_____"
],
[
"def plot_heatmap(heatmap, loc_map):\n\n fig = plt.figure(figsize=(16, 8))\n\n grid = ImageGrid(fig, 111, # as in plt.subplot(111)\n nrows_ncols=(1, 2),\n axes_pad=0.15,\n share_all=True,\n )\n\n # Add data to image grid\n im = grid[0].imshow(heatmap)\n im = grid[1].imshow(loc_map)\n plt.show()\n",
"_____no_output_____"
]
],
[
[
"## Calculate and plot heatmap",
"_____no_output_____"
]
],
[
[
"num = -1",
"_____no_output_____"
],
[
"heatmap = return_heatmap(model, test[num])",
"_____no_output_____"
],
[
"plot_heatmap(heatmap, loc_map[num])",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
]
] |
e7fc77289c0909639a1412adf0774f67380a3fe5 | 14,505 | ipynb | Jupyter Notebook | DecisionTree/MyDecisionTree.ipynb | QYHcrossover/ML-numpy | 863cc651ac38bc421e3b6e99f36a51267f0de0f9 | [
"MIT"
] | 12 | 2020-07-01T02:35:12.000Z | 2022-03-29T13:19:44.000Z | DecisionTree/MyDecisionTree.ipynb | QYHcrossover/ML-numpy | 863cc651ac38bc421e3b6e99f36a51267f0de0f9 | [
"MIT"
] | null | null | null | DecisionTree/MyDecisionTree.ipynb | QYHcrossover/ML-numpy | 863cc651ac38bc421e3b6e99f36a51267f0de0f9 | [
"MIT"
] | 2 | 2021-11-18T08:02:38.000Z | 2021-12-08T02:53:38.000Z | 26.712707 | 114 | 0.366563 | [
[
[
"import pandas as pd\nfrom sklearn.model_selection import train_test_split",
"_____no_output_____"
]
],
[
[
"## 构造数据集",
"_____no_output_____"
]
],
[
[
"def create_data():\n datasets = [['青年', '否', '否', '一般', '否'],\n ['青年', '否', '否', '好', '否'],\n ['青年', '是', '否', '好', '是'],\n ['青年', '是', '是', '一般', '是'],\n ['青年', '否', '否', '一般', '否'],\n ['中年', '否', '否', '一般', '否'],\n ['中年', '否', '否', '好', '否'],\n ['中年', '是', '是', '好', '是'],\n ['中年', '否', '是', '非常好', '是'],\n ['中年', '否', '是', '非常好', '是'],\n ['老年', '否', '是', '非常好', '是'],\n ['老年', '否', '是', '好', '是'],\n ['老年', '是', '否', '好', '是'],\n ['老年', '是', '否', '非常好', '是'],\n ['老年', '否', '否', '一般', '否'],\n ]\n labels = [u'年龄', u'有工作', u'有自己的房子', u'信贷情况', u'类别']\n # 返回数据集和每个维度的名称\n return datasets, labels",
"_____no_output_____"
],
[
"dataset,columns = create_data()\nX,y = np.array(dataset)[:,:-1],np.array(dataset)[:,-1]\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=1)",
"_____no_output_____"
],
[
"pd.DataFrame(datasets, columns=labels)",
"_____no_output_____"
]
],
[
[
"### 计算信息熵",
"_____no_output_____"
]
],
[
[
"def entropy(y):\n precs = np.array(list(Counter(y).values()))/len(y)\n ent = np.sum(-1 * precs * np.log(precs))\n return ent",
"_____no_output_____"
],
[
"entropy(y_train)",
"_____no_output_____"
]
],
[
[
"### 决定使用哪个特征分割",
"_____no_output_____"
]
],
[
[
"def decide_feature(X,y,feature_order):\n n_features = X.shape[-1]\n ents = (feature_order != -1).astype(np.float64)\n for i in range(n_features):\n if feature_order[i] >= 0:\n continue\n for feature,size in Counter(X[:,i]).items():\n index = (X[:,i] == feature)\n splity = y[index]\n ent = entropy(splity)\n ents[i] += ent*size/len(X)\n fi = np.argmin(ents)\n return fi,ents[fi]",
"_____no_output_____"
],
[
"decide_feature(X_train,y_train,np.array([-1,-1,-1,-1]))",
"_____no_output_____"
]
],
[
[
"### 构建决策树",
"_____no_output_____"
]
],
[
[
"def build_tree(X,y,feature_order):\n curent = entropy(y)\n counts = dict(Counter(y))\n if len(counts) == 1 or min(feature_order) == 0:\n result = max(counts,key=counts.get)\n return {\"counts\":counts,\"result\":result}\n fi,ent = decide_feature(X,y,feature_order)\n feature_order[fi] = max(feature_order)+1 \n result = None\n next_ = {}\n for value,_ in Counter(X[:,fi]).items():\n next_[value] = build_tree(X[X[:,fi]==value],y[X[:,fi]==value],feature_order)\n return {\"feature\":fi,\"entgain\":curent-ent,\"counts\":counts,\"result\":result,\"next\":next_}",
"_____no_output_____"
],
[
"tree = build_tree(X_train,y_train,np.array([-1,-1,-1,-1]))\ntree",
"是 6\n否 8\n是 2\n否 6\n"
]
],
[
[
"### predict",
"_____no_output_____"
]
],
[
[
"x_test = X_test[0]\nprint(x_test)\nwhile tree[\"result\"] == None:\n feature = tree[\"feature\"]\n nexttree = tree[\"next\"][x_test[feature]]\n tree = nexttree\nprint(tree[\"result\"])",
"['老年' '是' '否' '非常好']\n是\n"
],
[
"class ID3DecisionTree:\n @staticmethod\n def entropy(y):\n precs = np.array(list(Counter(y).values()))/len(y)\n ent = np.sum(-1 * precs * np.log(precs))\n return ent\n \n def decide_feature(self,X,y,feature_order):\n n_features = X.shape[-1]\n ents = (feature_order != -1).astype(np.float64)\n for i in range(n_features):\n if feature_order[i] >= 0:\n continue\n for feature,size in Counter(X[:,i]).items():\n index = (X[:,i] == feature)\n splity = y[index]\n ent = ID3DecisionTree.entropy(splity)\n ents[i] += ent*size/len(X)\n fi = np.argmin(ents)\n return fi,ents[fi]\n \n def build_tree(self,X,y,feature_order):\n curent = ID3DecisionTree.entropy(y)\n counts = dict(Counter(y))\n if len(counts) == 1 or min(feature_order) == 0:\n result = max(counts,key=counts.get)\n return {\"counts\":counts,\"result\":result}\n fi,ent = self.decide_feature(X,y,feature_order)\n feature_order[fi] = max(feature_order)+1 \n result = None\n next_ = {}\n for value,_ in Counter(X[:,fi]).items():\n next_[value] = self.build_tree(X[X[:,fi]==value],y[X[:,fi]==value],feature_order)\n return {\"feature\":fi,\"entgain\":curent-ent,\"counts\":counts,\"result\":result,\"next\":next_}\n \n def fit(self,X,y):\n feature_order = -1 * np.ones(X.shape[-1])\n self.tree = self.build_tree(X,y,feature_order)\n \n def predict(self,X):\n y = []\n for i in range(len(X)):\n x_test = X[i]\n tree = self.tree\n while tree[\"result\"] == None:\n feature = tree[\"feature\"]\n nexttree = tree[\"next\"][x_test[feature]]\n tree = nexttree\n y.append(tree[\"result\"])\n return y",
"_____no_output_____"
],
[
"dt = ID3DecisionTree()\ndt.fit(X_train,y_train)",
"_____no_output_____"
],
[
"dt.predict(X_test)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7fc77440c1c968eb342fafe83b27e13330c8300 | 39,893 | ipynb | Jupyter Notebook | ActiveDebrisRemoval.ipynb | jbrneto/active-debris-removal | 361b18731cf9a0e55f4dacef8c1f3b3d16b74abd | [
"MIT"
] | null | null | null | ActiveDebrisRemoval.ipynb | jbrneto/active-debris-removal | 361b18731cf9a0e55f4dacef8c1f3b3d16b74abd | [
"MIT"
] | null | null | null | ActiveDebrisRemoval.ipynb | jbrneto/active-debris-removal | 361b18731cf9a0e55f4dacef8c1f3b3d16b74abd | [
"MIT"
] | null | null | null | 44.325556 | 181 | 0.504926 | [
[
[
"!pip install pykep\n!pip install -U TLE-tools\n!pip install astropy",
"_____no_output_____"
],
[
"import random\nimport bisect\nimport numpy\nimport scipy\nimport copy\nfrom datetime import datetime\nfrom datetime import timedelta\n# -- for debris\nimport math\nimport csv\nfrom google.colab import files\nfrom google.colab import drive\nfrom tletools import TLE\nfrom astropy import constants\nimport pykep\n# -- for ploting\nimport datetime\nimport json\nimport time\nimport networkx as nx\nfrom matplotlib import pyplot as plt",
"_____no_output_____"
]
],
[
[
"# Genetic algorithm\nThe implementation uses the inver-over genetic operator to optimize the static sequence of debris based on the transference cost of the arcs.\n\nAlso, the implementation uses **index_frozen** to model the already deorbited debris.",
"_____no_output_____"
]
],
[
[
"class GA:\n def __init__(self, population, fn_fitness, subpath_fn_fitness=None):\n self.population = population\n self.index_frozen = -1\n self.fitnesses = [] # fitness for each individual in population\n self.fn_fitness = fn_fitness # fitness function for the whole path\n self.subpath_fn_fitness = subpath_fn_fitness # fitness function for a subpath\n\n # freezes a debris in all individuals\n def freeze_first(self, frozen):\n self.index_frozen += 1\n for i in range(len(self.population)):\n del self.population[i][self.population[i].index(frozen)]\n self.population[i].insert(self.index_frozen, frozen)\n\n # decay a debris in all individuals\n def decay(self, decayed_debris):\n for i in range(len(self.population)):\n for x in decayed_debris:\n if x in self.population[i]:\n del self.population[i][self.population[i].index(x)]\n\n # force a first debris for all individuals\n def startBy(self, debris):\n for i in range(len(self.population)):\n pos = self.population[i].index(debris)\n self.population[i] = self.population[i][pos:] + self.population[i][:pos]\n\n # returns the best individual\n def getBest(self):\n self.fit_population()\n best = min(self.fitnesses)\n return self.population[self.fitnesses.index(best)]\n\n # run the inverover to optimize the static case\n \"\"\"\n tinv : int : number of iterations\n feach : int : milestone to run kopt on the population\n runkopt : int : iterations of kopt\n forn : int : how many of the best individuals goes to kopt\n \"\"\"\n def run_inverover(self, tinv=1000, feach=1000, runkopt=100, forn=None):\n self.fit_population()\n self.inver_over(tinv, feach, runkopt, forn)\n self.fit_population()\n best = min(self.fitnesses)\n return self.population[self.fitnesses.index(best)]\n\n # select a random element of the population\n def selectUniform(self):\n return self.population[random.randrange(0, len(self.population))]\n\n # calculate the fitness for all individuals\n def fit_population(self):\n if self.index_frozen >= 0:\n self.fitnesses = list(map(lambda x: self.subpath_fn_fitness(x[self.index_frozen:]), self.population))\n else:\n self.fitnesses = list(map(lambda x: self.fn_fitness(x), self.population))\n\n # run the stochastic kopt for the population\n \"\"\"\n permuts : int : number of iterations\n elite : int : how many of the best shoud be processed\n \"\"\"\n def koptStochastic(self, permuts=100, elite=None):\n indexes = range(len(self.population))\n if elite is not None:\n indexes = numpy.array(self.fitnesses).argsort()[:elite]\n\n for x in indexes:\n indv = self.population[x]\n useds = {}\n changed = False\n\n for _ in range(0, permuts):\n valid = False\n while not valid:\n i = random.randrange(self.index_frozen+1, len(indv))\n j = i\n\n while j == i: j = random.randrange(self.index_frozen+1, len(indv))\n\n if (i, j) not in useds:\n valid = True\n\n useds[(i, j)] = True\n\n if j < i:\n temp = i\n i = j\n j = temp\n\n if self.subpath_fn_fitness(list(reversed(indv[i:j+1]))) < self.subpath_fn_fitness(indv[i:j+1]):\n changed = True\n indv = indv[0:i] + list(reversed(indv[i:j+1])) + indv[j+1:]\n\n if changed:\n self.population[x] = indv\n self.fitnesses[x] = self.subpath_fn_fitness(indv[self.index_frozen+1:])\n\n # run the ranged kopt for one individual\n \"\"\"\n indv : array : the individual\n maxrange : int : the range of analysis around the individual\n \"\"\"\n def ranged2opt(self, indv, maxrange=10):\n ranger = indv[len(indv)-maxrange:] + indv[self.index_frozen+1: self.index_frozen+maxrange+2]\n if len(set(ranger)) != len(ranger):\n return indv\n\n fit = self.subpath_fn_fitness(ranger)\n changed = True\n\n while changed:\n changed = False\n for i in range(len(ranger)):\n for j in range(len(ranger)):\n new_ranger = ranger[0:i] + list(reversed(ranger[i:j+1])) + ranger[j+1:]\n new_fit = self.subpath_fn_fitness(new_ranger)\n if new_fit < fit:\n fit = new_fit\n ranger = new_ranger\n changed = True\n break\n if changed:\n break\n\n indv[len(indv)-maxrange:] = ranger[:maxrange]\n indv[self.index_frozen+1: self.index_frozen+maxrange+2] = ranger[maxrange:]\n\n return indv\n\n # run the inverover for the population\n \"\"\"\n tinv : int : number of iterations\n feach : int : milestone to run kopt on the population\n runkopt : int : iterations of kopt\n forn : int : how many of the best individuals goes to kopt\n \"\"\"\n def inver_over(self, tinv, feach, runkopt, forn):\n for w in range(tinv):\n\n if w % feach == 0:\n self.koptStochastic(runkopt, forn)\n\n for i in range(len(self.population)):\n tmp = self.population[i]\n c1 = tmp[random.randrange(0, len(tmp))]\n\n changed = False\n\n while True:\n sl = self.population[i]\n c2 = c1\n\n while sl == self.population[i]: sl = self.selectUniform()\n c2 = sl[(sl.index(c1) + 1) % len(sl)]\n\n pos_c1 = tmp.index(c1)\n pos_c2 = tmp.index(c2)\n\n # if the genes are adjacent\n if c2 in [ tmp[pos_c1-1], tmp[(pos_c1 + 1) % len(tmp)] ]:\n break\n # elif and else reverse a subset of chromosome\n elif pos_c2 > pos_c1:\n changed = True\n c1 = tmp[(pos_c2 + 1) % len(tmp)]\n tmp = tmp[:pos_c1+1] + list(reversed(tmp[pos_c1+1:pos_c2+1])) + tmp[pos_c2+1:]\n else:\n changed = True\n c1 = tmp[pos_c2-1]\n inverted = list(reversed(tmp[pos_c1:] + tmp[:pos_c2]))\n div_pos = len(tmp)-pos_c1\n tmp = inverted[div_pos:] + tmp[pos_c2:pos_c1] + inverted[:div_pos]\n\n if changed:\n fit_tmp = self.fn_fitness(tmp)\n if fit_tmp < self.fitnesses[i]:\n self.population[i] = tmp\n self.fitnesses[i] = fit_tmp",
"_____no_output_____"
]
],
[
[
"# Problem instance\nThe active debris removal problem is going to be modeled as a complex variant of Traveling Salesman Problem (TSP), the time-dependent TSP (TDTSP).\n\nThe debris are the nodes and the dynamic transference trajectories are the edges.\n\nAlso, the Max Open Walk is used to find for the optimized subpath.",
"_____no_output_____"
]
],
[
[
"class StaticDebrisTSP:\n mu = 398600800000000 # gravitational parameter of earth\n re = 6378100 # radius of earth\n\n def __init__(self, debris=[], weight_matrix=[], reward_matrix=[], path_size=0, population_size=100, epoch=None, hohmanncost=False):\n self.index_frozen = -1\n self.debris = debris # the debris cloud\n self.reward_matrix = reward_matrix # the removal reward per debris\n self.kepler_elements = [] # kepler elements of the debris\n self.decayed_debris = [] # decayed debris\n self.hohmanncost=hohmanncost # if the cost is calculated with hohmann\n\n if epoch is not None:\n self.epoch = epoch\n else:\n epoch = pykep.epoch_from_string(\"2021-06-11 00:06:09\")\n\n is_matrix = len(weight_matrix) != 0\n\n # size of a indivual\n self.size = path_size if path_size != 0 else (len(weight_matrix) if is_matrix else len(debris))\n\n # random population that will be used just as an input for the GA\n self.population = []\n for i in range(0, population_size):\n self.population.append(random.sample(range(0, self.size), self.size))\n\n # eighter receive the weight matrix or calculate it\n if is_matrix:\n self.fitness_matrix = weight_matrix\n\n else:\n # remove decayed debris\n i = 0\n count = 0\n qtd_decayed = 0\n while count < self.size:\n if i >= len(debris):\n break\n\n try:\n self.kepler_elements.append(debris[i].osculating_elements(self.epoch))\n count += 1\n except:\n self.decayed_debris.append(i)\n qtd_decayed += 1\n i += 1\n\n print('Decayed debris ', qtd_decayed, 'Total ', len(self.kepler_elements))\n if len(self.kepler_elements) < self.size:\n raise BaseException('Insuficient size')\n\n # fitness matrix\n self.fitness_matrix = numpy.zeros((self.size, self.size))\n for i in range(0, self.size):\n for j in range(0, self.size):\n if self.hohmanncost:\n self.fitness_matrix[i][j] = StaticDebrisTSP.MYhohmann_impulse_aprox(self.kepler_elements[i], self.kepler_elements[j], self.epoch)\n else:\n try:\n self.fitness_matrix[i][j] = pykep.phasing.three_impulses_approx(debris[i], debris[j], self.epoch, self.epoch)\n except:\n d1 = self.kepler_elements[i]\n d2 = self.kepler_elements[j]\n self.fitness_matrix[i][j] = StaticDebrisTSP.MYthree_impulse_aprox(d1[0],d1[1],d1[2],d1[3],d2[0],d2[1],d2[2],d2[3],StaticDebrisTSP.mu)\n \n\n # freezes the first element\n def freeze_first(self):\n self.index_frozen += 1\n\n # returns if all debris were removed\n def all_frozen(self):\n return self.index_frozen >= (self.size-1-len(self.decayed_debris))\n\n # transform the debris kepler elements to certain epoch\n \"\"\"\n dt_epoch : datetime : the target epoch\n indexes : array : the debris that should be transformed\n \"\"\"\n def to_epoch(self, dt_epoch, indexes):\n new_epoch = pykep.epoch_from_string(dt_epoch.strftime(FMT))\n ranger = [x for x in range(0, self.size) if x in indexes]\n \n self.kepler_elements = list(numpy.zeros(self.size))\n for j in ranger:\n try:\n self.kepler_elements[j] = debris[j].osculating_elements(new_epoch)\n except:\n self.decayed_debris.append(j)\n\n for x in self.decayed_debris:\n if x in ranger:\n del ranger[ranger.index(x)]\n\n for i in ranger:\n for j in ranger:\n if self.hohmanncost:\n self.fitness_matrix[i][j] = StaticDebrisTSP.MYhohmann_impulse_aprox(self.kepler_elements[i], self.kepler_elements[j], new_epoch)\n else:\n try:\n self.fitness_matrix[i][j] = pykep.phasing.three_impulses_approx(debris[i], debris[j], new_epoch, new_epoch)\n except:\n d1 = self.kepler_elements[i]\n d2 = self.kepler_elements[j]\n self.fitness_matrix[i][j] = StaticDebrisTSP.MYthree_impulse_aprox(d1[0],d1[1],d1[2],d1[3],d2[0],d2[1],d2[2],d2[3],StaticDebrisTSP.mu)\n\n for x in self.decayed_debris:\n if x in indexes:\n del indexes[indexes.index(x)]\n\n return indexes\n\n # fitness is the sum cost to travel between each I and I+1 plus the last to initial\n def fitness(self, solution):\n fit = 0\n for i in range(0, self.size-1):\n fit += self.fitness_matrix[solution[i]][solution[i+1]]\n\n fit += self.fitness_matrix[solution[self.size-1]][solution[0]]\n return fit\n\n # partial fitness is the sum cost to travel between each I and I+1\n def partialFitness(self, part):\n fit = 0\n for i in range(0, len(part)-1):\n fit += self.fitness_matrix[part[i]][part[i+1]]\n\n return fit\n\n # reward is the sum reward of the debris in the solution\n def reward(self, solution):\n reward = 0\n for i in range(0, len(solution)):\n reward += self.reward_matrix[solution[i]]\n\n return reward\n\n # estimate the duration of a solution\n def duration(self, solution):\n duration = 0\n for i in range(0, len(solution)-1):\n duration += self.transferDuration(solution[i], solution[i+1], StaticDebrisTSP.mu)\n\n return duration\n\n # fitness TD is the fitness function for a timedependent solution\n def fitnessTD(self, solution):\n if len(solution) < 2:\n return 0\n \n fit = 0\n for i in range(0, len(solution)-1):\n epoch = pykep.epoch_from_string((solution[i+1][0]).strftime(FMT))\n if self.hohmanncost:\n d1 = debris[solution[i][1]].osculating_elements(epoch)\n d2 = debris[solution[i+1][1]].osculating_elements(epoch)\n fit += StaticDebrisTSP.MYhohmann_impulse_aprox(d1, d2, epoch)\n else:\n try:\n fit += pykep.phasing.three_impulses_approx(debris[solution[i][1]], debris[solution[i+1][1]], epoch, epoch)\n except:\n d1 = debris[solution[i][1]].osculating_elements(epoch)\n d2 = debris[solution[i+1][1]].osculating_elements(epoch)\n fit += StaticDebrisTSP.MYthree_impulse_aprox(d1[0],d1[1],d1[2],d1[3],d2[0],d2[1],d2[2],d2[3],StaticDebrisTSP.mu)\n\n return fit\n\n # duration TD is the duration estimate for a timedependent solution\n def durationTD(self, solution):\n duration = 0\n for i in range(0, len(solution)-1):\n duration += (solution[i+1][0] - solution[i][0]).total_seconds() # seconds waiting for right epoch\n\n epoch = pykep.epoch_from_string(solution[i+1][0].strftime(FMT))\n\n duration += self.transferDurationTD(solution[i][1], solution[i+1][1], epoch, epoch, StaticDebrisTSP.mu)\n\n return duration\n\n # reward TD is the reward function for a timedependent solution\n def rewardTD(self, solution):\n reward = 0\n for i in range(0, len(solution)):\n reward += self.reward_matrix[solution[i][1]]\n\n return reward\n \n # estimate the duration of a transfer (Hohmann) in seconds\n def transferDuration(self, d1, d2, u):\n d1_semi_major_axis = self.kepler_elements[d1][0]\n d2_semi_major_axis = self.kepler_elements[d2][0]\n transfer_semi_major_axis = (d1_semi_major_axis + d2_semi_major_axis) / 2\n time_of_transfer = math.pi * math.sqrt((transfer_semi_major_axis**3) / u)\n return time_of_transfer\n\n # estimate the duration of a transfer (Hohmann) in seconds in a certain epoch\n def transferDurationTD(self, d1, d2, epoch1, epoch2, u):\n kepler1 = debris[d1].osculating_elements(epoch1)\n kepler2 = debris[d2].osculating_elements(epoch2)\n d1_semi_major_axis = kepler1[0]\n d2_semi_major_axis = kepler2[0]\n transfer_semi_major_axis = (d1_semi_major_axis + d2_semi_major_axis) / 2\n time_of_transfer = math.pi * math.sqrt((transfer_semi_major_axis**3) / u)\n return time_of_transfer\n\n # find the constrained embedded maximal rewardable path in a solution\n def maxOpenWalk(self, solution, cost_limit=1000, time_limit=31536000):\n # calculate transferences\n transfers = []\n durations = []\n for i in range(0, len(solution)-1):\n sol_i = solution[i]\n sol_j = solution[i+1]\n transfers.append(self.fitness_matrix[sol_i][sol_j])\n durations.append(self.transferDuration(sol_i, sol_j, StaticDebrisTSP.mu))\n\n # calculate the maximal open walks starting at each arc\n maxWalks = []\n for i in range(0, len(transfers)):\n cost = transfers[i]\n duration = durations[i]\n walk = [i]\n\n for j in range(i+1, len(transfers)):\n if (cost + transfers[j]) > cost_limit or (duration + durations[j]) > time_limit:\n break;\n else:\n cost += transfers[j]\n duration += durations[j]\n walk.append(j)\n\n nodes = []\n reward = 0\n for a in range(0, len(walk)):\n arc = walk[a]\n if solution[arc] not in nodes:\n nodes.append(solution[arc])\n reward += self.reward_matrix[solution[arc]]\n nodes.append(solution[arc+1])\n reward += self.reward_matrix[solution[arc+1]]\n\n maxWalks.append({'walk': nodes, 'cost': cost, 'duration': duration, 'reward': reward})\n\n # find the biggest open walk\n w = 0\n for i in range(1, len(maxWalks)):\n if maxWalks[i]['reward'] > maxWalks[w]['reward']:\n w = i\n\n return maxWalks[w]\n\n # find the constrained embedded maximal rewardable path in a timedependent solution\n def maxOpenWalkTD(self, solution, cost_limit=1000, time_limit=31536000):\n # calculate transferences\n transfers = []\n durations = []\n for i in range(0, len(solution)-1):\n epoch = pykep.epoch_from_string((solution[i+1][0]).strftime(FMT))\n sol_i = solution[i][1]\n sol_j = solution[i+1][1]\n\n duration = (solution[i+1][0] - solution[i][0]).total_seconds() # seconds waiting for right epoch\n duration += self.transferDurationTD(sol_i, sol_j, epoch, epoch, StaticDebrisTSP.mu)\n durations.append(duration)\n\n if self.hohmanncost:\n d1 = debris[sol_i].osculating_elements(epoch)\n d2 = debris[sol_j].osculating_elements(epoch)\n transfers.append(StaticDebrisTSP.MYhohmann_impulse_aprox(d1, d2, epoch))\n else:\n try:\n transfers.append(pykep.phasing.three_impulses_approx(debris[sol_i], debris[sol_j], epoch, epoch))\n except:\n d1 = debris[sol_i].osculating_elements(epoch)\n d2 = debris[sol_j].osculating_elements(epoch)\n transfers.append(StaticDebrisTSP.MYthree_impulse_aprox(d1[0],d1[1],d1[2],d1[3],d2[0],d2[1],d2[2],d2[3],StaticDebrisTSP.mu))\n\n # calculate the maximal open walks starting at each arc\n maxWalks = []\n for i in range(0, len(transfers)):\n cost = transfers[i]\n duration = durations[i]\n walk = [i]\n\n for j in range(i+1, len(transfers)):\n if (cost + transfers[j]) > cost_limit or (duration + durations[j]) > time_limit:\n break;\n else:\n cost += transfers[j]\n duration += durations[j]\n walk.append(j)\n\n nodes = []\n reward = 0\n for a in range(0, len(walk)):\n arc = walk[a]\n if solution[arc] not in nodes:\n nodes.append(solution[arc])\n reward += self.reward_matrix[solution[arc][1]]\n nodes.append(solution[arc+1])\n reward += self.reward_matrix[solution[arc+1][1]]\n\n maxWalks.append({'walk': nodes, 'cost': cost, 'duration': duration, 'reward': reward})\n\n # find the biggest open walk\n w = 0\n for i in range(1, len(maxWalks)):\n if maxWalks[i]['reward'] > maxWalks[w]['reward']:\n w = i\n\n return maxWalks[w]\n\n # estimate the hohmann cost for a transfer between two debris\n # kepler elements order: a,e,i,W,w,M\n def MYhohmann_impulse_aprox(kepler1, kepler2):\n if kepler1 == kepler2:\n return 0\n \n d1 = math.sqrt(StaticDebrisTSP.mu/kepler1[0]) * (math.sqrt((2*kepler2[0]) / (kepler1[0]+kepler2[0])) - 1)\n d2 = math.sqrt(StaticDebrisTSP.mu/kepler2[0]) * (- math.sqrt((2*kepler1[0]) / (kepler1[0]+kepler2[0])) + 1)\n dv = abs(d1 + d2)\n\n re = - StaticDebrisTSP.mu / (2 * (StaticDebrisTSP.re + kepler2[0]))\n rvi = math.sqrt(2 * ( (StaticDebrisTSP.mu / (StaticDebrisTSP.re + kepler2[0])) + re))\n romega = abs(math.degrees(kepler2[2]) - math.degrees(kepler1[2]))\n rdv = 2 * rvi * math.sin(romega/2)\n \n return abs(dv + rdv)\n\n # estimate the edelbaum cost for a transfer between two debris\n # this implementation replaces the pykep implementation, since pykep throws an exception for decayed debris\n def MYthree_impulse_aprox(a1, e1, i1, W1, a2, e2, i2, W2, mu):\n # radius of apocenter/pericenter starting orbit (ms)\n ra1 = a1 * (1 + e1);\n ra2 = a2 * (1 + e2);\n rp1 = a1 * (1 - e2);\n rp2 = a2 * (1 - e2);\n\n # relative inclination between orbits\n cosiREL = math.cos(i1) * math.cos(i2) + math.sin(i1) * math.sin(i2) * math.cos(W1 - W2)\n\n # Strategy is Apocenter-Pericenter\n if ra1 > ra2:\n Vi = math.sqrt(mu * (2.0 / ra1 - 1.0 / a1));\n Vf = math.sqrt(mu * (2.0 / ra1 - 2.0 / (rp2 + ra1)));\n\n # Change Inclination + pericenter change\n DV1 = math.sqrt(Vi * Vi + Vf * Vf - 2.0 * Vi * Vf * cosiREL);\n # Apocenter Change\n DV2 = math.sqrt(mu) * abs(math.sqrt(2.0 / rp2 - 2.0 / (rp2 + ra1)) - math.sqrt(2.0 / rp2 - 1.0 / a2));\n\n return (DV1 + DV2)\n \n # Strategy is Pericenter-Apocenter\n else:\n Vi = math.sqrt(mu * ((2 / ra2) - (2 / (rp1 + ra2))));\n Vf = math.sqrt(mu * ((2 / ra2) - (1 / a2)));\n\n # Apocenter Raise\n DV1 = math.sqrt(mu) * abs(math.sqrt((2 / rp1) - (2 / (rp1 + ra1))) - math.sqrt((2 / rp1) - (2 / (rp1 + ra2))));\n # Change Inclination + apocenter change\n DV2 = math.sqrt(abs((Vi * Vi) + (Vf * Vf) - (2 * Vi * Vf * cosiREL)));\n\n return (DV1 + DV2)",
"_____no_output_____"
]
],
[
[
"# Instance loading\nThe instances can be downloaded at SATCAT site.\n\nIt is necessary to use a TXT file (TLE file) to get the debris names, codes and kepler elements, and a CSV file for the debris RCS (reward).",
"_____no_output_____"
]
],
[
[
"deb_file = 'fengyun-1c-debris'\ndebris = pykep.util.read_tle(tle_file=deb_file+'.txt', with_name=True)\nwith open(deb_file+'.txt') as f:\n tle_string = ''.join(f.readlines())\n\ntle_lines = tle_string.strip().splitlines()\ntle_elements = [tle_lines[i:i + 3] for i in range(0, len(tle_lines), 3)] #split in array of debris\ndebris_tle = [TLE.from_lines(*tle_elements[i]) for i in range(0, len(tle_elements))]\n\nwith open(deb_file+'.csv', newline='') as csvfile:\n satcat = list(csv.reader(csvfile))",
"_____no_output_____"
],
[
"# extract the reward for each debris\nareaDebris = []\nnorad_index = satcat[0].index('NORAD_CAT_ID')\nrcs_index = satcat[0].index('RCS')\nfor i in range(0, len(debris)):\n rcs = 0\n for j in range(1, len(satcat)):\n if (debris_tle[i].norad == satcat[j][norad_index]):\n if (satcat[j][rcs_index]):\n rcs = float(satcat[j][rcs_index])\n break\n areaDebris.append(rcs)",
"_____no_output_____"
]
],
[
[
"# Solution\nHere the actual solution is generated.\n\nAn interpolated tree search is performed to enhance the static to a time dependent solution.",
"_____no_output_____"
]
],
[
[
"start_epoch = \"2021-06-11 00:06:09\"\nFMT = '%Y-%m-%d %H:%M:%S'\nsteps = int((24 * 60) / 10) * 7 # in days\nstep_size = timedelta(minutes=10)\nremoval_time = timedelta(days=1) # time taken to deorbit a debris\nwinsize = 10 # range for the kopt\n\nfor _ in range(10):\n t0 = datetime.datetime.now() # to track time elapsed\n epoch = datetime.datetime.strptime(start_epoch, FMT)\n\n # generate the ga and problem instance\n problem = StaticDebrisTSP(epoch=pykep.epoch_from_string(start_epoch), hohmanncost=False, debris=debris, reward_matrix=areaDebris, path_size=size, population_size=100)\n ga = GA(population=problem.population, fn_fitness=problem.fitness, subpath_fn_fitness=problem.partialFitness)\n\n # generate the static solution\n curr_solution = ga.run_inverover(tinv=20000, feach=1000, runkopt=100, forn=5)\n curr_fit = problem.partialFitness(curr_solution)\n print('initial fit: '+str(curr_fit))\n\n # find the static max open walk\n path = problem.maxOpenWalk(curr_solution, 1000, 60*60*24*365) # 1km/s and 1 year\n \n # make the population start by best starting debris, and get the best then\n ga.startBy(path['walk'][0])\n curr_solution = ga.getBest()\n curr_fit = problem.partialFitness(curr_solution)\n print('secondal fit: '+str(curr_fit))\n\n # use the first debris for the time dependent solution\n solution = [(epoch, curr_solution[0])]\n problem.freeze_first()\n ga.freeze_first(curr_solution[0])\n\n while not problem.all_frozen():\n i = problem.index_frozen\n \n # run ranged kopt to optimize the current part of solution\n if i > 0 and (i < len(curr_solution)-1):\n curr_solution[i:i+winsize+1] = problem.to_epoch(epoch, curr_solution[i:i+winsize+1])\n curr_solution[-(winsize+1):] = problem.to_epoch(epoch, curr_solution[-(winsize+1):])\n ga.decay(problem.decayed_debris)\n curr_solution = ga.ranged2opt(curr_solution, winsize)\n \n # get the next transference to be performed\n transition = curr_solution[i:i+2]\n \n # validates if the debris in this transference are going to decay in during the interpolation\n transition = problem.to_epoch(epoch + (step_size * steps), transition)\n if len(transition) < 2:\n curr_solution[i:i+2] = transition\n ga.decay(problem.decayed_debris)\n continue\n\n # calculate the costs of the transference for the interpolation range\n epoch_aux = epoch\n x = []\n y = []\n for j in range(0, steps):\n problem.to_epoch(epoch, transition)\n x.append(j)\n y.append(problem.partialFitness(transition))\n epoch += step_size\n\n # get the minimal cost point in the interpolated function\n interpolator = scipy.interpolate.interp1d(x, y, kind='cubic')\n xnew = numpy.linspace(0, steps-1, num=steps*3, endpoint=True) # num = precision\n least = numpy.argmin(interpolator(xnew))\n\n # get the epoch of the minimal cost transference\n epoch = epoch_aux + (step_size * xnew[least])\n\n # append to the time dependent solution\n solution.append((epoch, curr_solution[i+1]))\n\n # pushes the current epoch to after the deorbitation process\n pykep_epoch = pykep.epoch_from_string(epoch.strftime(FMT))\n transfer_duration = timedelta(seconds=problem.transferDurationTD(curr_solution[i], curr_solution[i+1], pykep_epoch, pykep_epoch, StaticDebrisTSP.mu))\n epoch += removal_time + transfer_duration\n\n # freezes the deorbited debris\n problem.freeze_first()\n ga.freeze_first(curr_solution[i+1])\n\n t1 = datetime.datetime.now()",
"_____no_output_____"
],
[
"# instance results\nprint(solution)\nprint('elapsed time: '+ str(t1 - t0))\nprint('fit: ' + str(problem.fitnessTD(solution)))\nprint('dur: ' + str(problem.durationTD(solution)/60/60/24) + ' days')\nprint('rew: ' + str(problem.rewardTD(solution)))\n\n# constrained (best mission) results\npath = problem.maxOpenWalkTD(solution, 1000, 60*60*24*365) # 1km/s and 1 year\nprint(path)\nprint('walk ' + str(len(path['walk'])))\nprint('w_cost ' + str(path['cost']))\nprint('w_rew ' + str(path['reward']))\nprint('w_dur ' + str(path['duration']/60/60/24) + ' days')",
"_____no_output_____"
]
],
[
[
"# Bibliography",
"_____no_output_____"
],
[
"**Instances**\n\nTLE Derbis: https://celestrak.com/NORAD/elements/\n\nRCS: https://celestrak.com/satcat/search.php - LEGACY text\n\nFormat: https://celestrak.com/satcat/satcat-format.php\n\n**Used Libs**\n\nhttps://esa.github.io/pykep/documentation/phasing.html#pykep.phasing.three_impulses_approx\n\n**Reference source codes**\n\nhttps://github.com/esa/pagmo/blob/master/src/problem/base_tsp.cpp\nhttps://github.com/esa/pagmo/blob/master/src/algorithm/inverover.cpp\n\nhttps://stackoverflow.com/questions/47982604/hamiltonian-path-using-python/47985349\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/src/util/neighbourhood.cpp\n\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/PyGMO/problem/_tsp.py\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/src/problem/base_tsp.cpp\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/src/problem/tsp.cpp\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/src/problem/tsp_cs.cpp\nhttps://github.com/esa/pagmo/blob/80281d549c8f1b470e1489a5d37c8f06b2e429c0/src/problem/tsp_ds.cpp\nhttps://github.com/esa/pykep/blob/2e1c97bea138d2c125d6695e7662991e6da30203/include/keplerian_toolbox/core_functions/three_impulses_approximation.hpp\n\n**Reference physics**\n\nhttps://en.wikipedia.org/wiki/Hohmann_transfer_orbit\n\nhttps://en.wikipedia.org/wiki/Kepler%27s_laws_of_planetary_motion#Third_law\n\nhttps://en.wikipedia.org/wiki/Orbital_period\n\nhttps://space.stackexchange.com/questions/35166/how-to-find-t₀-and-other-parameters-from-a-tle-to-calculate-an-approximate-mean/35190#35190\n\nhttps://space.stackexchange.com/questions/18289/how-to-get-semi-major-axis-from-tle\n\nhttps://ai-solutions.com/_freeflyeruniversityguide/hohmann_transfer.htm",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7fc7bd5d2b2ee68dee5e173ef6068b8d2f39f3c | 90,693 | ipynb | Jupyter Notebook | 2. Sparse SSHIBA.ipynb | alexjorguer/SSHIBA | 785cbffb569745ab58921749bc90420494e4223b | [
"MIT"
] | 2 | 2021-05-20T10:01:54.000Z | 2021-11-17T12:02:13.000Z | 2. Sparse SSHIBA.ipynb | sevisal/SSHIBA | 785cbffb569745ab58921749bc90420494e4223b | [
"MIT"
] | null | null | null | 2. Sparse SSHIBA.ipynb | sevisal/SSHIBA | 785cbffb569745ab58921749bc90420494e4223b | [
"MIT"
] | 1 | 2021-11-17T12:02:54.000Z | 2021-11-17T12:02:54.000Z | 283.415625 | 35,116 | 0.922199 | [
[
[
"# 2. Feature SelectionModel\nAuthor: _Carlos Sevilla Salcedo (Updated: 18/07/2019)_\n\nIn this notebook we are going to present the extension to include a double sparsity in the model. The idea behind this modification is that besides imposing sparsity in the latent features, we could also force to have sparsity in the input features. This way, the model is capable of not only inferring any data desired, but also carry out a feature selection with a measure of the importance of each feature.\n\nThe main advantage of this characteristic is two-fold: 1) It allows us to work with lower dimension matrices which, in return, improves the speed of the results. 2) It provides a selection of the most important features which in certain scenarios can considerably improve the interpretability of the results.\n\n## Synthetic data generation\n\nWe can now generate data in a similar manner to the regression model to compare the performance of both apporaches. In this case we are going to include some random features to try to suppress them with the sparse model.",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nnp.random.seed(0)\n\nN = 1000 # number of samples\nD0 = 55 # input features\nD1 = 3 # output features\n\nmyKc = 20\n\nK = 2 # common latent variables\nK0 = 3 # first view's latent variables\nK1 = 3 # second view's latent variables\nKc=K+K0+K1 # latent variables\n\n# Generation of matrix W\nA0 = np.random.normal(0.0, 1, D0 * K).reshape(D0, K)\nA1 = np.random.normal(0.0, 1, D1 * K).reshape(D1, K)\n\nB0 = np.random.normal(0.0, 1, D0 * K0).reshape(D0, K0)\nB1 = np.random.normal(0.0, 1, D1 * K1).reshape(D1, K1)\n\nW0 = np.hstack((np.hstack((A0,B0)),np.zeros((D0,K1))))\nW1 = np.hstack((np.hstack((A1,np.zeros((D1,K0)))),B1))\nW_tot = np.vstack((W0,W1))\n\n# Generation of matrix Z\nZ = np.random.normal(0.0, 1, Kc * N).reshape(N, Kc)\n\n# Generation of matrix X\nX0 = np.dot(Z,W0.T) + np.random.normal(0.0, 0.1, D0 * N).reshape(N, D0)\nX1 = np.dot(Z,W1.T) + np.random.normal(0.0, 0.1, D1 * N).reshape(N, D1)\n\n#Random features generation\nDN0 = 300\nX0 = np.hstack((X0,np.random.normal(0.0, 0.1, DN0 * N).reshape(N,DN0)))",
"_____no_output_____"
]
],
[
[
"Once the data is generated we divide it into train and test in order to be able to test the performance of the model. After that, we can normalize the data.",
"_____no_output_____"
]
],
[
[
"from sklearn.model_selection import train_test_split\nX_tr, X_tst, Y_tr, Y_tst = train_test_split(X0, X1, test_size=0.3, random_state = 31)\n\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nX_tr = scaler.fit_transform(X_tr)\nX_tst = scaler.transform(X_tst)",
"_____no_output_____"
]
],
[
[
"## Training the model\n\nOnce the data is prepared we just have to feed it to the model. As the model has so many possibilities we have decided to pass the data to the model following a particular structure so that we can now, for each view, if the data corresponds to real, multilabel or categorical as well as knowing if we want to calculate the model with sparsity in the features.\n\nIn this case we are indicating we want to prune the latent features, so that if a latent feature has a sufficiently low value for all the features in matrix $W$, this latent feature will be eliminated.",
"_____no_output_____"
]
],
[
[
"import os\nos.sys.path.append('lib')\nimport sshiba",
"_____no_output_____"
],
[
"myKc = 20 # number of latent features\nmax_it = int(5*1e4) # maximum number of iterations\ntol = 1e-6 # tolerance of the stopping condition (abs(1 - L[-2]/L[-1]) < tol)\nprune = 1 # whether to prune the irrelevant latent features\n\nmyModel = sshiba.SSHIBA(myKc, prune)\nX0_tr = myModel.struct_data(X_tr, 0, 1)\nX1_tr = myModel.struct_data(Y_tr, 0, 0)\nX0_tst = myModel.struct_data(X_tst, 0, 0)\nX1_tst = myModel.struct_data(Y_tst, 0, 0)\nmyModel.fit(X0_tr, X1_tr, max_iter = max_it, tol = tol, Y_tst = X1_tst, X_tst = X0_tst, mse = 1)\nprint('Final MSE %.3f' %(myModel.mse[-1]))",
"Iteration 1620 Lower Bound 473636.1 K 9\nModel correctly trained. Convergence achieved\nFinal L(Q): 473636.1\nFinal MSE 9.493\n"
]
],
[
[
"## Visualization of the results\n\n### Lower Bound and MSE\n\nNow the model is trained we can plot the evolution of the lower bound through out the iterations. This lower bound is calculated using the values of the variables the model is calculating and is the value we are maximizing. As we want to maximize this value it has to be always increasing with each iteration.\n\nAt the same time, we are plotting now the evolution of the Minimum Square Error (MSE) with each update of the model. As we are not minimizing this curve, this doesn't necessarily have to be always decreasing and might need more iterations to reach a minimum. ",
"_____no_output_____"
]
],
[
[
"def plot_mse(mse):\n fig, ax = plt.subplots(figsize=(10, 4))\n ax.plot(mse, linewidth=2, marker='s',markersize=5, label='SSHIBA', markerfacecolor='red')\n ax.grid()\n ax.set_xlabel('Iteration')\n ax.set_ylabel('MSE')\n plt.legend()\n\ndef plot_L(L):\n fig, ax = plt.subplots(figsize=(10, 4))\n ax.plot(L, linewidth=2, marker='s',markersize=5, markerfacecolor='red')\n ax.grid()\n ax.set_xlabel('Iteration')\n ax.set_ylabel('L(Q)')\n \ndef plot_W(W):\n plt.figure()\n plt.imshow((np.abs(W)), aspect=W.shape[1]/W.shape[0])\n plt.colorbar()\n plt.title('W')\n plt.ylabel('features')\n plt.xlabel('K')",
"_____no_output_____"
],
[
"plot_L(myModel.L)\nplt.title('Lower Bound')\n\nplot_mse(myModel.mse)\nplt.title('mse test')\nplt.show()",
"_____no_output_____"
]
],
[
[
"### Sparsity in matrix W\n\nFor the sake of this example the model has not been automatically erasing a feature whenever it is considered as irrelevant and, instead of deleting it the model has just learned that these features are less important.\n\nAs we now have the different weights given to each feature based on their relevance, we can now determine whether the algorithm has been capable of determining which features are relevant or not. To do so, we are going to start by showing the values of the learnt variable $\\gamma$ which tole is to force sparsity in the feature space.",
"_____no_output_____"
]
],
[
[
"q = myModel.q_dist\ngamma = q.gamma_mean(0)\n\nax1 = plt.subplot(2, 1, 1)\nplt.title('Feature selection analysis')\nplt.hist(gamma,100)\nax2 = plt.subplot(2, 1, 2)\nplt.plot(gamma,'.')\nplt.ylabel('gamma')\nplt.xlabel('feature')\nplt.show()",
"_____no_output_____"
]
],
[
[
"As we can see, the values that we have randomly added to the original data are recognisible and can be therefore easily selected as relevant. We can also see the efect of the sparsity by looking at matrix $W$ where the features that are found to be irrelevant have lower values than the ones which are relevant.",
"_____no_output_____"
]
],
[
[
"pos_ord_var=np.argsort(gamma)[::-1]\nplot_W(q.W[0]['mean'][pos_ord_var,:])",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fc815feec2734a42b857c80a0e362064c15d1f | 61,930 | ipynb | Jupyter Notebook | examples/notebooks/15Matching_sections.ipynb | fprice111/python-dts-calibration | bc972070ab1c9fe43e9ecc85ace30e2877b8cd00 | [
"BSD-3-Clause"
] | 20 | 2019-10-07T15:54:07.000Z | 2022-03-18T07:18:22.000Z | examples/notebooks/15Matching_sections.ipynb | fprice111/python-dts-calibration | bc972070ab1c9fe43e9ecc85ace30e2877b8cd00 | [
"BSD-3-Clause"
] | 90 | 2019-01-25T09:41:37.000Z | 2022-03-21T12:45:30.000Z | examples/notebooks/15Matching_sections.ipynb | fprice111/python-dts-calibration | bc972070ab1c9fe43e9ecc85ace30e2877b8cd00 | [
"BSD-3-Clause"
] | 9 | 2019-10-16T12:37:59.000Z | 2022-02-18T21:24:29.000Z | 210.646259 | 30,844 | 0.907121 | [
[
[
"# 15. Calibration using matching sections",
"_____no_output_____"
],
[
"In notebook 14 we showed how you can take splices or connectors within your calibration into account. To then calibrate the cable we used reference sections on both sides of the splice. If these are not available, or in other cases where you have a lack of reference sections, matching sections can be used to improve the calibration.\n\nFor matching sections you need two sections of fiber than you know will be the exact same temperature. This can be, for example, in duplex cables or twisted pairs of cable.",
"_____no_output_____"
],
[
"### Demonstration\nTo demonstrate matching sections, we'll load the same dataset that was used in previous notebooks, and modify the data to simulate a lossy splice, just as in notebook 14.",
"_____no_output_____"
]
],
[
[
"import os\n\nfrom dtscalibration import read_silixa_files\nimport matplotlib.pyplot as plt\n%matplotlib inline",
"_____no_output_____"
],
[
"filepath = os.path.join('..', '..', 'tests', 'data', 'double_ended2')\n\nds_ = read_silixa_files(\n directory=filepath,\n timezone_netcdf='UTC',\n file_ext='*.xml')\n\nds = ds_.sel(x=slice(0, 110)) # only calibrate parts of the fiber\n\n\nsections = {\n 'probe1Temperature': [slice(7.5, 17.)], # cold bath\n 'probe2Temperature': [slice(24., 34.)], # warm bath\n }\nds.sections = sections",
"6 files were found, each representing a single timestep\n6 recorded vars were found: LAF, ST, AST, REV-ST, REV-AST, TMP\nRecorded at 1693 points along the cable\nThe measurement is double ended\nReading the data from disk\n"
]
],
[
[
"Again, we introduce a step loss in the signal strength at x = 50 m. For the forward channel, this means all data beyond 50 meters is reduced with a 'random' factor. For the backward channel, this means all data up to 50 meters is reduced with a 'random' factor.",
"_____no_output_____"
]
],
[
[
"ds['st'] = ds.st.where(ds.x < 50, ds.st*.8)\nds['ast'] = ds.ast.where(ds.x < 50, ds.ast*.82)\n\nds['rst'] = ds.rst.where(ds.x > 50, ds.rst*.85)\nds['rast'] = ds.rast.where(ds.x > 50, ds.rast*.81)",
"_____no_output_____"
]
],
[
[
"We will first run a calibration without adding the transient attenuation location or matching sections. A big jump in the calibrated temperature is visible at x = 50. \n\nAs all calibration sections are before 50 meters, the first 50 m will be calibrated correctly.",
"_____no_output_____"
]
],
[
[
"ds_a = ds.copy(deep=True)\n\nst_var, resid = ds_a.variance_stokes(st_label='st')\nast_var, _ = ds_a.variance_stokes(st_label='ast')\nrst_var, _ = ds_a.variance_stokes(st_label='rst')\nrast_var, _ = ds_a.variance_stokes(st_label='rast')\n\nds_a.calibration_double_ended(\n st_var=st_var,\n ast_var=ast_var,\n rst_var=rst_var,\n rast_var=rast_var,\n store_tmpw='tmpw',\n method='wls',\n solver='sparse')\n\nds_a.isel(time=0).tmpw.plot(label='calibrated')",
"_____no_output_____"
]
],
[
[
"Now we run a calibration, adding the keyword argument '**trans_att**', and provide a list of floats containing the locations of the splices. In this case we only add a single one at x = 50 m.\n\nWe will also define the matching sections of cable. The matching sections have to be provided as a list of tuples. A tuple per matching section. Each tuple has three items, the first two items are the slices of the sections that are matching. The third item is a bool and is True if the two sections have a reverse direction (as in the \"J-configuration\").\n\nIn this example we match the two cold baths to each other.\n\nAfter running the calibration you will see that by adding the transient attenuation and matching sections the calibration returns the correct temperature, without the big jump.\n\n*In single-ended calibration the keyword is called '**trans_att**'.*",
"_____no_output_____"
]
],
[
[
"matching_sections = [\n (slice(7.5, 17.6), slice(69, 79.1), False)\n]\n\nst_var, resid = ds.variance_stokes(st_label='st')\nast_var, _ = ds.variance_stokes(st_label='ast')\nrst_var, _ = ds.variance_stokes(st_label='rst')\nrast_var, _ = ds.variance_stokes(st_label='rast')\n\nds.calibration_double_ended(\n st_var=st_var,\n ast_var=ast_var,\n rst_var=rst_var,\n rast_var=rast_var,\n trans_att=[50.],\n matching_sections=matching_sections,\n store_tmpw='tmpw',\n method='wls',\n solver='sparse')\n\nds_a.isel(time=0).tmpw.plot(label='normal calibration')\nds.isel(time=0).tmpw.plot(label='matching sections')\nplt.legend()",
"/home/bart/git/python-dts-calibration/.tox/docs/lib/python3.7/site-packages/scipy/sparse/_index.py:116: SparseEfficiencyWarning: Changing the sparsity structure of a csr_matrix is expensive. lil_matrix is more efficient.\n self._set_arrayXarray_sparse(i, j, x)\n"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fc8838dee2b490c19691366f98441c26bcb604 | 295,396 | ipynb | Jupyter Notebook | nbs/julia_sets.ipynb | adiamaan92/brotground | 25263438b69fa46c2c3fc0667a42bd6524b76d9e | [
"MIT"
] | 3 | 2021-11-24T03:12:35.000Z | 2022-02-07T02:15:45.000Z | nbs/julia_sets.ipynb | adiamaan92/brotground | 25263438b69fa46c2c3fc0667a42bd6524b76d9e | [
"MIT"
] | null | null | null | nbs/julia_sets.ipynb | adiamaan92/brotground | 25263438b69fa46c2c3fc0667a42bd6524b76d9e | [
"MIT"
] | null | null | null | 2,204.447761 | 164,334 | 0.964624 | [
[
[
"!pip install brotground==0.1.3",
"_____no_output_____"
],
[
"from brotground import JuliaBrot\nfrom brotground.resources import quadratic_julia_set\nfrom brotground.renderers import StaticRenderer",
"_____no_output_____"
],
[
"matplot_renderer = StaticRenderer()\n\nquadratic_julia_set",
"_____no_output_____"
],
[
"julia = JuliaBrot(julia_name=\"frost_fractal\")\njulia.iterate_diverge(max_iterations=100)\n\nmatplot_renderer.plot(julia, cmap=\"inferno\")",
"_____no_output_____"
],
[
"julia = JuliaBrot(julia_name=\"galaxiex_fractal\")\njulia.iterate_diverge(max_iterations=100)\n\nmatplot_renderer.plot(julia, cmap=\"coolwarm\")",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7fc93e8df725b87b3c4317e389965bb87643cfc | 51,658 | ipynb | Jupyter Notebook | Module4/IntroToRegularization.ipynb | AlephEleven/Deep-Learning-Explained | e08138d5dcd98dad30e0c6950553b94720c0d9ad | [
"Unlicense"
] | 1 | 2022-02-26T22:59:36.000Z | 2022-02-26T22:59:36.000Z | Module4/IntroToRegularization.ipynb | AlephEleven/Deep-Learning-Explained | e08138d5dcd98dad30e0c6950553b94720c0d9ad | [
"Unlicense"
] | null | null | null | Module4/IntroToRegularization.ipynb | AlephEleven/Deep-Learning-Explained | e08138d5dcd98dad30e0c6950553b94720c0d9ad | [
"Unlicense"
] | null | null | null | 40.263445 | 704 | 0.623311 | [
[
[
"# Deep Learning Explained\n\n# Module 4 - Lab - Introduction to Regularization for Deep Neural Nets \n\n\n\nThis lesson will introduce you to the principles of regularization required to successfully train deep neural networks. In this lesson you will:\n\n1. Understand the need for regularization of complex machine learning models, particularly deep NNs. \n2. Know how to apply constraint-based regularization using the L1 and L2 norms.\n3. Understand and apply the concept of data augmentation. \n4. Know how to apply dropout regularization. \n5. Understand and apply early stopping. \n6. Understand the advantages of various regularization methods and know when how to apply them in combination. \n\n\n",
"_____no_output_____"
],
[
"## 1.0 Why do we need regularization for deep learning?\n\nDeep learning models have a great many parameters (weights) which must be fit. This situation arises from the wide and deep architectures that are required to achieve significant **model capacity** for representing complex functions. The core issue is that over-fit models will simply learn the training data and **over-fit models do not generalize**. Therefore, regularization methods are required in order to prevent over-fitting.\n\nIn particular, we can point to three interrelated problems with training deep neural networks:\n\n1. Neural network models have large numbers of parameters (weights). With any finite size data set, there is likely to be a low ratio of cases per parameter or low ratio of cases to features. \n2. As a result of the large numbers of parameters, neural networks are susceptible to noise in the training data. Neural networks are generally considered less robust to noise than shallow machine learning methods. \n3. Presumably as a result of the model complexity, neural networks often return unexpected predictions for data cases outside the training data domain. This property has been referred to as **brittleness**. Brittleness has proven to be a serious problem in some production systems. \n\nThe regularization methods presented here will limit these effects. However, there is no 'silver bullet'! Neural networks are hard to train under the best of circumstances. ",
"_____no_output_____"
],
[
"### 1.1 Bias-variance trade-off\n\nTo better understand this trade-off let's decompose mean square error for a model as follows:\n\n$$\\Delta y = E \\big[ Y - \\hat{f}(X) \\big]$$\n\nWhere, \n$Y = $ the label vector. \n$X = $ the feature matrix. \n$\\hat{f}(x) = $ the trained model. \n\nExpanding this relation gives us:\n\n$$\\Delta y = \\big( E[ \\hat{f}(X)] - \\hat{f}(X) \\big)^2 + E \\big[ ( \\hat{f}(X) - E[ \\hat{f}(X)])^2 \\big] + \\sigma^2\\\\\n\\Delta y = Bias^2 + Variance + Irreducible\\ Error$$\n\n\nRegularization will reduce variance, but increase bias. Regularization parameters must be chosen to minimize $\\Delta x$. In many cases, this will prove challenging. \n\nNotice that the **irreducible error** is the limit of model accuracy. Even if we had a perfect model with no bias or variance, the irreducible error is inherent in the data and problem. ",
"_____no_output_____"
],
[
"### 1.2 Demonstration of over-parameterization\n\nLet's try a simple example. We will construct a regression models with different numbers of parameters and therefore different model capacities. \n\nAs a first step, we will create a simple single regression model of some synthetic data. The code in the cell below creates data computed from as a straight line, but with considerable Normally distributed random noise. A plot is then created of the result. Execute this code and examine the resulting plot. ",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport numpy.random as nr\nimport matplotlib.pyplot as plt\nfrom numpy.random import normal, seed\nimport sklearn.linear_model as slm\nfrom sklearn.preprocessing import scale\nimport sklearn.model_selection as ms\nfrom math import sqrt\nimport keras\nimport keras.models as models\nimport keras.layers as layers\nfrom keras.layers import Dropout, LeakyReLU\nfrom keras import regularizers\nfrom keras.layers.normalization import BatchNormalization\nfrom tensorflow import set_random_seed\n\nseed(34567)\nx = np.arange(start = 0.0, stop = 10.0, step = 0.25) \ny = np.add(x, normal(scale = 2.0, size = x.shape[0]))\n\nplt.scatter(x,y)",
"_____no_output_____"
]
],
[
[
"Notice that these data points fall approximately on a straight line, but with significant deviations. \n\nNext, you will compute a simple single regression model. This model has an intercept term and a single slope parameter. The code in the cell below splits the data into randomly selected training and testing subsets. Execute this code.",
"_____no_output_____"
]
],
[
[
"indx = range(len(x))\nseed(9988)\nindx = ms.train_test_split(indx, test_size = 20)\nx_train = np.ravel(x[indx[0]])\ny_train = np.ravel(y[indx[0]])\nx_test = np.ravel(x[indx[1]])\ny_test = np.ravel(y[indx[1]])",
"_____no_output_____"
]
],
[
[
"Next, we will use the linear model in `sklearn.linear_model` package to create a single regression model for these data. The code in the cell below does just this, prints the single model coefficient, and plots the result. Execute this code. \n",
"_____no_output_____"
]
],
[
[
"def plot_reg(x, y_score, y):\n ax = plt.figure(figsize=(6, 6)).gca() # define axis\n \n ## Get the data in plot order\n xy = sorted(zip(x,y_score))\n x = [x for x, _ in xy]\n y_score = [y for _, y in xy]\n\n ## Plot the result\n plt.plot(x, y_score, c = 'red')\n plt.scatter(x, y)\n plt.title('Predicted line with test data')\n\ndef reg_model(x, y):\n mod = slm.LinearRegression()\n x_scale = scale(x) # .reshape(-1, 1)\n mod.fit(x_scale, y)\n print(mod.coef_)\n return mod, x_scale, mod.predict(x_scale)\n\nmod, x_scale, y_hat = reg_model(x_train.reshape(-1, 1), y_train)\n\nplot_reg(x_scale, y_hat, y_train)",
"_____no_output_____"
]
],
[
[
"Examine these results. Notice that the single coefficient (slope) seems reasonable, given the standardization of the training data. Visually, the fit to the training data also looks reasonable. \n\nWe should also test the fit to some test data. The code in the cell does just this and returns the RMS error. execute this code.",
"_____no_output_____"
]
],
[
[
"from math import sqrt\ndef test_mod(x,y, mod):\n x_scale = scale(x)\n y_score = mod.predict(x_scale)\n plot_reg(x_scale, y_score, y)\n return np.std(y_score - y)\n\ntest_mod(x_test.reshape(-1, 1), y_test, mod)",
"_____no_output_____"
]
],
[
[
"Again, these results look reasonable. The RMSE is relatively small given the significant dispersion in these data. \n\nNow, try a model with significantly higher capacity. In this case we compute new features for a 9th order polynomial model. Using this new set of features a regression model is trained and a summary displayed. ",
"_____no_output_____"
]
],
[
[
"seed(2233)\nx_power = np.power(x_train.reshape(-1, 1), range(1,10))\nx_scale = scale(x_power)\n\nmod_power = slm.LinearRegression()\nmod_power.fit(x_scale, y_train)\ny_hat_power = mod_power.predict(x_scale)\n\nplot_reg(x_scale[:,0], y_hat_power, y_train)\n\nprint(mod_power.coef_)\nprint(np.std(y_hat_power - y_train))",
"_____no_output_____"
]
],
[
[
"Notice the following, indicating the model is quite over-fit. \n- There is a wide range of coefficient values across 7 orders of magnitude. This situation is in contrast to the coefficient of the single regression model which had a reasonable single digit value.\n- The graph of the fitted model shows highly complex behavior. In reality, this behavior indicates the model is 'learning the data'. \n\nNow, we will try to test the model with the held-back test data. The code in the cell below creates the same features and applies the `predict` method to the model using these test features. ",
"_____no_output_____"
]
],
[
[
"x_test_scale = scale(x_test.reshape(-1, 1)) # Prescale to prevent numerical overflow. \nx_test_power = np.power(x_test_scale, range(1,10))\nx_scale_test = scale(x_test_power)\n\ny_hat_power = mod_power.predict(x_scale_test)\n\nplot_reg(x_scale_test[:,0], y_hat_power, y_test)\n\nprint(np.std(y_hat_power - y_test))",
"_____no_output_____"
]
],
[
[
"This is clearly a terrible fit! The RMSE is enormous and the curve of predicted values bears little resemblance to the test values. Indeed, this is a common problem with over-fit models that the errors grow in very rapidly toward the edges of the training data domain. We can definitely state that this model **does not generalize**. ",
"_____no_output_____"
],
[
"## 2.0 l2 regularization\n\nWe will now explore one of the mostly widely used regularization methods, often referred to as l2 regularization. \n\nThe same method goes by some other names, as it has been 'invented' several times. In particular, this method is known as, **Tikhonov regularization**, **l2 norm regularization**, **pre-whitening** in engineering, and for linear models **ridge regression**. In all likelihood the method was first developed by the Russian mathematician Andrey Tikhonov in the late 1940's. His work was not widely known in the West since his short book on the subject, [Solution of Ill-Posed Problems](https://www.researchgate.net/publication/44438630_Solutions_of_ill-posed_problems_Andrey_N_Tikhonov_and_Vasiliy_Y_Arsenin), was only published in English in 1977, about 30 years after it had appeared in Russian.\n\n![](img/Tikhonov_board.jpg)\n<center> **Figure 2.1 \nCommemorative plaque for Andrey Nikolayevich Tikhonov at Moscow State University**\n\n\nSo, what is the basic idea? l2 regularization applies a **penalty** proportional to the **l2** or **Euclidean norm** of the model weights to the loss function. The total loss function then becomes: \n\n$$J(W) = J_{MLE}(W) + \\lambda ||W||^2$$\n\nWhere,\n\n$$||W||^2 = \\big( w_1^2 + w_2^2 + \\ldots + w_n^2 \\big)^{\\frac{1}{2}} = \\Big( \\sum_{i=1}^n w_i^2 \\Big)^{\\frac{1}{2}}$$\n\nWe call $||W||^2$ the l2 norm of the weights since we square the power of the weights, sum, and then take the square root, or $\\frac{1}{2}$ power. \n\nYou can think of this penalty as constraining the 12 or Euclidean norm of the model weight vector. The value of the hyperparameter $\\lambda$ determines how much the norm of the coefficient vector constrains the solution. You can see a view of this geometric interpretation in Figure 2.2 below. \n\n![](img/L2.jpg)\n<center> **Figure 2.2. Geometric view of l2 regularization**\n\nNotice that for a constant value of l2, the values of the model parameters $B1$ and $B2$ are related. For example, if $B1$ is maximized then $B2 \\sim 0$, or vice versa. It is important to note that l2 regularization is a **soft constraint**. Coefficients are driven close to, but likely not exactly to, zero. \n",
"_____no_output_____"
],
[
"### 2.1 Regularization for regression \n\nLet's go back to the regression example. Recall that the 9th order polynomial regression model was massively over-fit. Can l2 regularization help this situation? We can create a model applying regularization and find out. \n\nThe code in the cell below uses the `Ridge` model from `sklearn.linear_model`. The `Ridge` model has an argument `alpha` which corresponds to the regularization parameter, in the notation we have been using. Execute the code and examine the result. ",
"_____no_output_____"
]
],
[
[
"mod_L2 = slm.Ridge(alpha = 100.0)\nmod_L2.fit(x_scale, y_train)\ny_hat_L2 = mod_L2.predict(x_scale)\n\nprint(np.std(y_hat_L2 - y_train))\nprint(mod_L2.coef_)\n\nplot_reg(x_train, y_hat_L2, y_train)",
"_____no_output_____"
]
],
[
[
"This model is quite different from the un-regularized one we trained previously. \n- The coefficients all have small values. Some of the coefficients are significantly less than 1. These small coefficients are a direct result of the l2 penalty.\n- The fitted curve looks rather reasonable given the noisy data.\n\nNow test the model on the test data. Execute the code in the cell below and examine the results. ",
"_____no_output_____"
]
],
[
[
"y_hat_L2 = mod_L2.predict(x_scale_test)\n\nplot_reg(x_scale_test[:,0], y_hat_L2, y_test)\n\nprint(np.std(y_hat_L2 - y_test))",
"_____no_output_____"
]
],
[
[
"This result looks a lot more reasonable. The RMSE is nearly the same as for the single feature regression example. Also, the predicted curve looks reasonable.\n\nIn summary, we have seen that l2 regularization significantly improves the result for the 9th order polynomial regression. The coefficients are kept within a reasonable range and the predictions are much more reasonable than the unconstrained model. ",
"_____no_output_____"
],
[
"### 2.2 l2 regularization for deep learning models \n\nSo, you may well wonder, how well l2 regularization applies to neural networks? Let's give it a try using the 9th order polynomial data. \n\nThe code in the cell below defines and fits the regression model with a single hidden layer with 128 units. No regularization is applied in this first model. ",
"_____no_output_____"
]
],
[
[
"nr.seed(345)\nset_random_seed(4455)\nnn = models.Sequential()\nnn.add(layers.Dense(128, activation = 'relu', input_shape = (9, )))\nnn.add(layers.Dense(1))\nnn.compile(optimizer = 'rmsprop', loss = 'mse', metrics = ['mae'])\nhistory = nn.fit(x_scale, y_train, \n epochs = 30, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)",
"_____no_output_____"
]
],
[
[
"With the model fit, let's have a look at the loss function vs. training epoch. Execute the code in the cell below and examine the result. ",
"_____no_output_____"
]
],
[
[
"def plot_loss(history):\n train_loss = history.history['loss']\n test_loss = history.history['val_loss']\n x = list(range(1, len(test_loss) + 1))\n plt.plot(x, test_loss, color = 'red', label = 'Test loss')\n plt.plot(x, train_loss, label = 'Train loss')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.title('Loss vs. Epoch')\n \nplot_loss(history) ",
"_____no_output_____"
]
],
[
[
"It looks like this model becomes overfit after 3 or 4 training epochs. \n\nExecute the code in the cell below to compute and plot predictions for the unconstrained model. ",
"_____no_output_____"
]
],
[
[
"history = nn.fit(x_scale, y_train, \n epochs = 4, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\npredicted = nn.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predicted, y_test)\nprint(np.std(predicted - y_test))",
"_____no_output_____"
]
],
[
[
"Both the high RMSE and the odd behavior of the predicted curve indicates that this model does not generalize well at all. Notice in particular, how the predicted curve moves away from the test data values on the right. \n\nNow, we will try to improve this result by applying l2 norm regularization to the neural network. The code in cell below adds l2 regularization to the model. Execute the code and examine the results.",
"_____no_output_____"
]
],
[
[
"nr.seed(45678)\nset_random_seed(45546)\nnn = models.Sequential()\nnn.add(layers.Dense(128, activation = 'relu', input_shape = (9, ),\n kernel_regularizer=regularizers.l2(2.0)))\nnn.add(layers.Dense(1))\nnn.compile(optimizer = 'rmsprop', loss = 'mse', metrics = ['mae'])\nhistory = nn.fit(x_scale, y_train, \n epochs = 30, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"This loss function is quite a bit different than for the unconstrained model. It is clear that regularization allows many more training epochs before over-fitting. \n\nBut are the predictions any better? Execute the code in the cell below and find out. ",
"_____no_output_____"
]
],
[
[
"history = nn.fit(x_scale, y_train, \n epochs = 30, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\npredicted = nn.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predicted, y_test)\nprint(np.std(predicted - y_test))",
"_____no_output_____"
]
],
[
[
"The l2 regularization has reduced the RMSE. Just as significantly, the pathological behavior of the predicted values on the right is reduced, but clearly not eliminated. The bias effect is also visible. Notice that the left part of the fitted curve is now shifted upwards. ",
"_____no_output_____"
],
[
"****************\n**Exercise 1:** You have now tried l2 regularization with one choice of regularization hyperparameter, namely the regularization parameter. Finding a good choice for the regularization parameter can require some trial and error. The objective is to find a value that produces a minimum test error.\n\nIn the code cells below, create models as follows: \n1. A regularization parameter of 20.0, using a `numpy.random.seed` of 9456 and `set_random_seed` for the TensorFlow backend of 55566\n2. A regularization parameter of 200.0, using a `numpy.random.seed` of 9566 and `set_random_seed` for the TensorFlow backend of 44223. \n\nPlot the loss history for both models. Make sure you give you models different names. ",
"_____no_output_____"
]
],
[
[
"nr.seed(9456)\nset_random_seed(55566)\n",
"_____no_output_____"
],
[
"nr.seed(9566)\nset_random_seed(44223)\n",
"_____no_output_____"
]
],
[
[
"Next, in the cells below you will create code to compute and plot the predicted values from your model for the test data, along with the error metric. Include the test data values on your plot. ",
"_____no_output_____"
]
],
[
[
"history20 = nn20.fit(x_scale, y_train, \n epochs = 30, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\npredicted20 = nn20.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predicted20, y_test)\nprint(np.std(predicted20 - y_test))",
"_____no_output_____"
],
[
"history200 = nn200.fit(x_scale, y_train, \n epochs = 30, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\npredicted200 = nn200.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predicted200, y_test)\nprint(np.std(predicted200 - y_test))",
"_____no_output_____"
]
],
[
[
"Finally, compare the results for the three models with regularization hyperparameter values of 2.0, 20.0, and 200.0. Notice how the RMSE improves as the hyperparameter increases. Notice also, that the test loss for the highest hyperparameter value decreases most uniformly, indicating less over-fitting of the model. ",
"_____no_output_____"
],
[
"## 3.0 l1 regularization\n\nWe can also do regularization using other norms. The **l1 regularization** or **Lasso** method limits the sum of the absolute values of the model coefficients. The l1 norm is sometime know as the **Manhattan norm**, since distance are measured as if you were traveling on a rectangular grid of streets. This is in contrast to the l2 norm that measures distance 'as the crow flies'. \n\nWe can compute the l1 norm of the weights as follows:\n\n$$||W||^1 = \\big( |w_1| + |w_2| + \\ldots + |w_n| \\big) = \\Big( \\sum_{i=1}^n |w_i| \\Big)^1$$\n\nwhere $|x|$ is the absolute value of $x$. \n\nNotice that to compute the l1 norm, we raise the sum of the absolute values to the first power.\n\nAs with l2 regularization, in l1 regularization we use a penalty term of the l1 norm of the weights. A penalty multiplier, $\\alpha$, determines how much the norm of the coefficient vector constrains values of the weights. The complete loss function then becomes: \n\n$$J(W) = J_{MLE}(W) + \\alpha ||W||^1$$\n\nYou can see a view of this geometric interpretation in Figure 3.1 below. \n\n![](img/L1.jpg)\n<center> **Figure 3.1. Geometric view of L1 regularization**\n\nNotice that in Figure 3.1 if $B1 = 0$ then $B2$ has a value at the limit, or vice versa. In other words, using a l1 norm constraint forces some weight values to zero to allow other coefficients to take correct values. In this way, the l1 norm constraint **knocks out** some weights from the model altogether. In contrast to l2 regularization, l1 regularization will drive some coefficients to exactly zero. ",
"_____no_output_____"
],
[
"### 3.1 l1 regularization\n\nWith these ideas in mind, let's apply l1 norm regularization to the 9th order polynomial regression problem. The code in cell below applies l1 regularized or Lasso regularization to the linear regression problem. Execute this code and examine the results. ",
"_____no_output_____"
]
],
[
[
"mod_L1 = slm.Lasso(alpha = 2.0, max_iter=100000)\nmod_L1.fit(x_scale, y_train)\ny_hat_L1 = mod_L1.predict(x_scale)\n\nprint(np.std(y_hat_L1 - y_test))\nprint(mod_L1.coef_)\n\nplot_reg(x_train, y_hat_L1, y_train)",
"_____no_output_____"
]
],
[
[
"Notice the following about the results of this l1 regularized regression:\n- Many of the coefficients are 0, as expected.\n- The fitted curve looks reasonable. \n\nNow, execute the code in the cell below and examine the prediction results. ",
"_____no_output_____"
]
],
[
[
"y_hat_L1 = mod_L1.predict(x_scale_test)\n\nplot_reg(x_scale_test[:,0], y_hat_L1, y_test)\n\nprint(np.std(y_hat_L1 - y_test))",
"_____no_output_____"
]
],
[
[
"The RMSE has been reduced considerably, and is less than for l2 regularization regression. The plot of predicted values looks similar to the single regression model, but with some bias. \n",
"_____no_output_____"
],
[
"### 3.2 Neural network with l1 regularization",
"_____no_output_____"
],
[
"Now, we will try l1 regularization with a neural network. The code in the cell below defines, fits and plots a single layer neural network using l1 regularization. Execute this code and examine the results.",
"_____no_output_____"
]
],
[
[
"nn = models.Sequential()\nnn.add(layers.Dense(128, activation = 'relu', input_shape = (9, ),\n kernel_regularizer=regularizers.l1(10.0)))\nnn.add(layers.Dense(1))\nnn.compile(optimizer = 'rmsprop', loss = 'mse', metrics = ['mae'])\nhistory = nn.fit(x_scale, y_train, \n epochs = 100, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"As a result of the l1 regularization the training loss does not exhibit signs of over-fitting for quite a few epochs. \n\nNext, excute the code in the cell below to compute and display predicted values from the trained network. ",
"_____no_output_____"
]
],
[
[
"history = nn.fit(x_scale, y_train, \n epochs = 40, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n verbose = 0)\npredicted = nn.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predicted, y_test)\nprint(np.std(predicted - y_test))",
"_____no_output_____"
]
],
[
[
"These results are a definite improvement. The RMSE is similar to that produced by the l2 regularization neural network. Further, the fitting curve shows similar behavior and bias. This bias is the result of the regularization. ",
"_____no_output_____"
],
[
"## 4.0 Early stopping\n\nEarly stopping is conceptually simple. Early stopping terminates the training of the neural network model at an epoch before it becomes terribly over-fit. That's it! That is the idea of early stopping.\n\nIn fact, we have already been using early stopping as we create and test the foregoing regularized models. The question here is, how do we automate this process? ",
"_____no_output_____"
],
[
"### 4.1 Early stopping algorithm\n\nThe early stopping algorithm simple. This pseudo code shows the basic loop for early stopping on first epoch with a lower performance metric, which is executed after the first training epoch of the model. \n\n`Do while TRUE: \n store current model parameters \n update model for epoch \n if(performance_for_epoch < stored_performance_metric) \n return stored_model \n else \n stored_performance_metric = performance_for_epoch \n store_model = model \n` \n\n",
"_____no_output_____"
],
[
"### 4.2 How does early stopping work?\n\nEarly stopping terminates model learning before over-fitting occurs. But how can we interpret this action in terms of the loss function $J(W)_{MLE}$? Figure 4.1 below provides some insight. \n\n![](img/EarlyStopping.JPG) \n<center>**Figure 4.1 Effect of early stopping on $J(W)_{MLE}$**</center>\n\nOn the left side of the diagram you can see contours of the weight norm. On the right are contours Early stopping terminates training at some model weight norm $||W||^2$. Ideally this is at the point where the training of $J(W)_{MLE}$ starts to over-fit. Thus, we can think of early stopping as analogous to l2 norm regularization where we write the loss function as:\n\n$$argmin_W J(W) = J(W)_{MLE} + \\alpha ||W||^2$$\n\nwhere,\n\n$\\alpha = $ a regularization parameter controlling the stopping point. ",
"_____no_output_____"
],
[
"### 4.3 Early stopping example\n\nManually applying early stopping is both computationally inefficient and rather tedious. Fortunately, Keras has a build in capability that allows automation. \n\nTo implement this early stopping we need to define 2 Keras **callbacks**. Two such callbacks are required:\n1. The first callback, **EarlyStopping**, is for the early stopping method.\n2. The second call back **checkpoints** or saves the current model. \n\nThese callbacks are defined in the form of a **callbacks list**. \n\nNotice that the model defined includes l2 regularization. Thus, this model should replicate the performance observed with manual early stopping. To see how this works, examine and then execute the code in the following cell.",
"_____no_output_____"
]
],
[
[
"## First define and compile a model. \nnn = models.Sequential()\nnn.add(layers.Dense(128, activation = 'relu', input_shape = (9, ),\n kernel_regularizer=regularizers.l2(1.0)))\nnn.add(layers.Dense(1))\n\nnn.compile(optimizer = 'RMSprop', loss = 'mse', metrics = ['mae'])\n\n## Define the callback list\nfilepath = 'my_model_file.hdf5' # define where the model is saved\ncallbacks_list = [\n keras.callbacks.EarlyStopping(\n monitor = 'val_loss', # Use loss to monitor the model\n patience = 1 # Stop after one step with lower accuracy\n ),\n keras.callbacks.ModelCheckpoint(\n filepath = filepath, # file where the checkpoint is saved\n monitor = 'val_loss', # Don't overwrite the saved model unless val_loss is worse\n save_best_only = True # Only save model if it is the best\n )\n]\n\n## Now fit the model\nnr.seed(5566)\nset_random_seed(6767)\nhistory = nn.fit(x_scale, y_train, \n epochs = 40, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n callbacks = callbacks_list, # Call backs argument here\n verbose = 0)\n\n## Visualize the outcome\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"You can see the behavior of the loss with training epoch is behaving as with l2 regularization alone. Notice that the training has been automatically terminated at the point the loss function is at its optimum. \n\nLet's also have a look at the accuracy vs. epoch. Execute the code in the cell below and examine the result. ",
"_____no_output_____"
]
],
[
[
"def plot_accuracy(history):\n train_acc = history.history['mean_absolute_error']\n test_acc = history.history['val_mean_absolute_error']\n x = list(range(1, len(test_acc) + 1))\n plt.plot(x, test_acc, color = 'red', label = 'Test error rate')\n plt.plot(x, train_acc, label = 'Train error rate')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('Error rate')\n plt.title('Error Rate vs. Epoch') \n \nplot_accuracy(history) ",
"_____no_output_____"
]
],
[
[
"The curve of test accuracy is consistent with the test loss.\n\nThe code in the cell below retrieves the best model (by our stopping criteria) from storage, computes predictions and displays the result. Execute this code and examine the results. ",
"_____no_output_____"
]
],
[
[
"best_model = keras.models.load_model(filepath)\npredictions = best_model.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predictions, y_test)\nprint(np.std(predictions - y_test))",
"_____no_output_____"
]
],
[
[
"As expected, these results are similar, but a bit worse, than those obtained while manually stopping the training of the l2 regularized neural network. ",
"_____no_output_____"
],
[
"## 5.0 Dropout regularization\n\nAll of the regularization methods we have discussed so far, originated long before the current deep neural network era. We will now look at the **dropout regularization** method. Of all widely used regularization methods, dropout is one of the few specifically developed for neural networks. The seminal paper, [Dropout: A Simple Way to Prevent Neural Networks from\nOverfitting by Srivastava et. al](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf), 2014, is quite readable and provides a lot more detail than is presented here.\n\nWe have already seen how l1 norm regularization knocks out some model weights. The dropout method regularizes neural networks by creating an **ensemble** of networks with some fraction $p \\lt 1.0$ of the hidden units removed. Ensemble methods are know to be strong regularizers and produce superior results by combining the learning of multiple **weak learners**. \n\nThe dropout method is somewhat different from other ensemble methods, such as bagging. This reweighting scheme has several advantages:\n- The model weights for the resulting networks are reweighted by the probabilities that they are sampled in the ensemble. \n- The memory required to train the model is simply $O(n)$, where n is the number of weights. A bagged model requires $O(M*n)$, where $M$ is the number of models in the ensemble.\n- When making predictions in production only one model is used. Whereas, the predictions for each model in the bag must be computed for bagging. \n\nTo understand this method, let's recall the basic model for the output of a lth layer in a fully connected network:\n\n$$z^{(l+1)}_i = w^{(l+1)}_i \\cdot h^{(l)} + b^{(l+1)}_i\\\\\nh^{(l+1)}_i = \\sigma(z^{(l+1)}_i)$$\n\nwhere:\n\n$\\sigma = $ the activation function. \n\nNow, we need to sample the hidden units with probability $p$, in which case we can write:\n\n$$r^{(l)}_i \\sim Bernoulli(p)\\\\\n\\tilde{h}^{(l)}_i = r^{(l)}_i * y^{(l)}\\\\\nz^{(l+1)}_i = w^{(l+1)}_i \\cdot \\tilde{h}^{(l)}_i + b^{(l+1)}_i\\\\\nh^{(l+1)}_i = \\sigma(z^{(l+1)}_i)$$\n\nwhere:\n\n$r^{(l)}_i =$dropout vector with values $\\{0,1\\}$.\n\nTo get a feel for what this means in practice examine Figure 5.1. This figure shows a fully connected network with 4 hidden units and a dropout probability $p = 0.5$. \n\n![](img/DropoutExample.JPG)\n![](img/DropoutExample2.JPG)\n\n<center>**Figure 5.1 \nPossible dropouts for a simple fully connected network with p = 0.5**</center>\n\nExamine Figure 5.1 and notice the following:\n\n- There are 6 ways to achieve dropout with exactly 1/2 the units as shown. \n- No units might dropout with probability $p^4$. \n- A single unit might drop out with probability $p^3 (1-p)$. \n- All units might drop out with probability $(1-p)^4$. This case is not admissible so should not be sampled. \n\nIn fact there are $n^2$ possible dropout patterns for a hidden layer with n units. This scaling quickly leads to a problem. For any realistic size network, it is not possible to fully sample all of the possibilities. Instead, we need to use some kind of approximation with a reasonable number of samples. \n\nIdeally, we want a model that gives us the posterior probability of $y$, the output, given $x$ the input which we can write $p(y\\ |\\ x)$. If we had infinite computing resources we could Monte Carlo sample this distribution for our neural network. This ideal reference neural network is known as **Bayesian network**. Clearly, for large scale networks it is not possible to compute this result. \n\nWe have to settle for a sampled result. We reweight by the probability that a sample is created. Continuing with the notation we used before we can write:\n\n$$p(y\\ |\\ x) \\sim \\sum_r p(r) p(y\\ |\\ x, r)$$\n\nwhere,\n\n$r = $ the Bernoulli sampled mask vector. \n\nGiven enough samples the approximation above will converge to the desired probability distribution. However, in practice it has been found that the **geometric mean** of the ensemble converges faster. ",
"_____no_output_____"
],
[
"### 5.1 Computing a neural network with dropout regularization\n\nWith a bit of theory in mind, we will now apply dropout regularization to training a neural network. The code in the cell below defines a neural network with a dropout layer with $p =0.5$. The rest of this network is identical to other networks we have been working with. Execute this code and examine the result. ",
"_____no_output_____"
]
],
[
[
"## First define and compile a model with a dropout layer. \nnn = models.Sequential()\nnn.add(layers.Dense(128, activation = 'relu', input_shape = (9, )))\nnn.add(Dropout(rate = 0.5)) # Use 50% dropout on this model\nnn.add(layers.Dense(1))\nnn.compile(optimizer = 'rmsprop', loss = 'mse', metrics = ['mae'])\n\n## Now fit the model\nnr.seed(1144)\nset_random_seed(6723)\nhistory = nn.fit(x_scale, y_train, \n epochs = 40, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n callbacks = callbacks_list, # Call backs argument here\n verbose = 0)\n\n## Visualize the outcome\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"The familiar loss plot looks a bit different here. Notice the kinks in the training loss curve. This is likely a result of the dropout sampling. \n\nExecute the code in the cell below, and examine the accuracy vs. epoch curves. ",
"_____no_output_____"
]
],
[
[
"plot_accuracy(history)",
"_____no_output_____"
]
],
[
[
"The behavior of the training accuracy curve has a similar appearance to the loss curve in terms of the jagged appearance. \n\nExecute the code in the cell below examine the prediction results for this model. ",
"_____no_output_____"
]
],
[
[
"best_model = keras.models.load_model(filepath)\npredictions = best_model.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predictions, y_test)\nprint(np.std(predictions - y_test))",
"_____no_output_____"
]
],
[
[
"These results appear similar to those obtained with other regularization methods for neural networks on this problem, particularly, early stopping. While the dropout method is an effective regularizer it is no 'silver bullet'. ",
"_____no_output_____"
],
[
"## 6.0 Batch Normalization\n\nIt is often the case that the distribution of output values of some hidden layers changes . The result is that propagated gradients can become near zero, significantly slowing convergence in many cases. We will discuss this **vanishing gradient problem** in another lesson. \n\nIn 2015, [Sergey and Szegedy](https://arxiv.org/pdf/1502.03167.pdf) introduced a solution to this problem with their paper **Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift**. The basic idea is simple. A batch normalization layer maintains an exponential moving average estimate of the mean and variance of the outputs of layer. These values are used to normalize the output values of that layer. In other words, the batch normalization layer ensures the distribution of the output values are constant. \n\nLet's try an example. The simple neural network model defined in the code cell below includes a batch normalization layer. Also notice that to improve convergence the early stopping has been modified to have a patience of 3. Execute this code. ",
"_____no_output_____"
]
],
[
[
"## Use patience of 3\ncallbacks_list = [\n keras.callbacks.EarlyStopping(\n monitor = 'val_loss', # Use loss to monitor the model\n patience = 3 # Stop after three steps with lower accuracy\n ),\n keras.callbacks.ModelCheckpoint(\n filepath = filepath, # file where the checkpoint is saved\n monitor = 'val_loss', # Don't overwrite the saved model unless val_loss is worse\n save_best_only = True # Only save model if it is the best\n )\n]\n\n\n## Now, define an NN model using batch normalization. \n## First define and compile a model with a batch normalization layer. \nnn = models.Sequential()\nnn.add(layers.Dense(128, input_shape = (9, ), activation = 'relu'))\nnn.add(BatchNormalization(momentum = 0.99))\nnn.add(layers.Dense(1))\n## Define the optimizer and compile\noptm = keras.optimizers.rmsprop(lr=1.0)\nnn.compile(optimizer = optm, loss = 'mse', metrics = ['mae'])\n\n## Now fit the model\nnr.seed(345)\nset_random_seed(4532)\nhistory = nn.fit(x_scale, y_train, \n epochs = 100, batch_size = 1,\n validation_data = (x_scale_test, y_test),\n callbacks = callbacks_list, # Call backs argument here\n verbose = 0)\n\n## Visualize the outcome\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"The loss decreases rapidly and then remains in a narrow range thereafter. It appears that convergence is quite rapid.\n\nHow does the accuracy evolve with the training episodes? Execute the code in the cell below to display the result. ",
"_____no_output_____"
]
],
[
[
"plot_accuracy(history)",
"_____no_output_____"
]
],
[
[
"This accuracy curve is rather unusual. It seems to reflect the simple regularization being used. \n\nFinally, execute the code in the cell below to evaluate the predictions made with this model. ",
"_____no_output_____"
]
],
[
[
"best_model = keras.models.load_model(filepath)\npredictions = best_model.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predictions, y_test)\nprint(np.std(predictions - y_test))",
"_____no_output_____"
]
],
[
[
"The fit to the test data look fairly good. ",
"_____no_output_____"
],
[
"## 7.0 Using multiple regularization methods\n\n**Exercise 2:** In many cases more than one regularization method is applied. We have already applied early stopping with other regularization methods. In this exercise you will create a neural network work using four regularization methods at once:\n- l2 regularization\n- l1 regularization\n- Dropout\n- Early stopping \n\nIn the cell below create code for a neural network using the above regularization methods. Your code should include the following:\n\n1. Set a `numpy.random` seed of 242244 and a `set_random_seed` for the TensorFlow backend of 4356.\n2. Define a call back list with `EarlyStopping` with monitor set to `val_loss` and patience set to 4, and the `ModelCheckpoint` with monitor set to `val_loss`\n3. A fully connected layer with 128 units and ReLU activation. Include l1 and l2 regulaization using the `l1_l2` function with the regularization parameter set to 50.0.\n4. A dropout layer with regularization parameter set to 0.5.\n5. Fit your model for 120 epochs, with batch size of 10, using the already defined callback list\n\nFit you neural network model, saving the results to a history object. ",
"_____no_output_____"
]
],
[
[
"nr.seed(242244)\nset_random_seed(4346)\n\n",
"_____no_output_____"
]
],
[
[
"In the cell below create and execute the code to plot the loss history for both training and test. ",
"_____no_output_____"
]
],
[
[
"## Visualize the outcome\nplot_loss(history)",
"_____no_output_____"
]
],
[
[
"In the cell below create and execute the code to plot the accuracy history for both training and test. ",
"_____no_output_____"
]
],
[
[
"plot_accuracy(history)",
"_____no_output_____"
]
],
[
[
"Next, in the cells below you will create code to compute and plot the predicted values from your model for the test data, along with the error metric. Include the test data values on your plot. ",
"_____no_output_____"
]
],
[
[
"best_model = keras.models.load_model(filepath)\npredictions = best_model.predict(x_scale_test)\nplot_reg(x_scale_test[:,0], predictions, y_test)\nprint(np.std(predictions - y_test))",
"_____no_output_____"
]
],
[
[
"How do these results compare to using single regularization methods. Has there been any improvement in accuracy? What about the bias in the fitted curve which is quite noticeable when some of the single regularization methods are used? ",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7fca7be0c9548cf83d8b3bb03a0918a52449732 | 167,393 | ipynb | Jupyter Notebook | convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb | mwizasimbeye11/udacity-pytorch-scholar-challenge | 5d76f66b6d3185a01fb37dc17302a13eb6299da4 | [
"MIT"
] | null | null | null | convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb | mwizasimbeye11/udacity-pytorch-scholar-challenge | 5d76f66b6d3185a01fb37dc17302a13eb6299da4 | [
"MIT"
] | null | null | null | convolutional-neural-networks/mnist-mlp/mnist_mlp_exercise.ipynb | mwizasimbeye11/udacity-pytorch-scholar-challenge | 5d76f66b6d3185a01fb37dc17302a13eb6299da4 | [
"MIT"
] | null | null | null | 317.032197 | 99,196 | 0.919776 | [
[
[
"# Multi-Layer Perceptron, MNIST\n---\nIn this notebook, we will train an MLP to classify images from the [MNIST database](http://yann.lecun.com/exdb/mnist/) hand-written digit database.\n\nThe process will be broken down into the following steps:\n>1. Load and visualize the data\n2. Define a neural network\n3. Train the model\n4. Evaluate the performance of our trained model on a test dataset!\n\nBefore we begin, we have to import the necessary libraries for working with data and PyTorch.",
"_____no_output_____"
]
],
[
[
"# import libraries\nimport torch\nimport numpy as np",
"_____no_output_____"
]
],
[
[
"---\n## Load and Visualize the [Data](http://pytorch.org/docs/stable/torchvision/datasets.html)\n\nDownloading may take a few moments, and you should see your progress as the data is loading. You may also choose to change the `batch_size` if you want to load more data at a time.\n\nThis cell will create DataLoaders for each of our datasets.",
"_____no_output_____"
]
],
[
[
"from torchvision import datasets\nimport torchvision.transforms as transforms\n\n# number of subprocesses to use for data loading\nnum_workers = 0\n# how many samples per batch to load\nbatch_size = 20\n\n# convert data to torch.FloatTensor\ntransform = transforms.ToTensor()\n\n# choose the training and test datasets\ntrain_data = datasets.MNIST(root='data', train=True,\n download=True, transform=transform)\ntest_data = datasets.MNIST(root='data', train=False,\n download=True, transform=transform)\n\n# prepare data loaders\ntrain_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size,\n num_workers=num_workers)\ntest_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, \n num_workers=num_workers)",
"Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\nDownloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\nProcessing...\nDone!\n"
]
],
[
[
"### Visualize a Batch of Training Data\n\nThe first step in a classification task is to take a look at the data, make sure it is loaded in correctly, then make any initial observations about patterns in that data.",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n \n# obtain one batch of training images\ndataiter = iter(train_loader)\nimages, labels = dataiter.next()\nimages = images.numpy()\n\n# plot the images in the batch, along with the corresponding labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n # print out the correct label for each image\n # .item() gets the value contained in a Tensor\n ax.set_title(str(labels[idx].item()))",
"_____no_output_____"
]
],
[
[
"### View an Image in More Detail",
"_____no_output_____"
]
],
[
[
"img = np.squeeze(images[1])\n\nfig = plt.figure(figsize = (12,12)) \nax = fig.add_subplot(111)\nax.imshow(img, cmap='gray')\nwidth, height = img.shape\nthresh = img.max()/2.5\nfor x in range(width):\n for y in range(height):\n val = round(img[x][y],2) if img[x][y] !=0 else 0\n ax.annotate(str(val), xy=(y,x),\n horizontalalignment='center',\n verticalalignment='center',\n color='white' if img[x][y]<thresh else 'black')",
"_____no_output_____"
]
],
[
[
"---\n## Define the Network [Architecture](http://pytorch.org/docs/stable/nn.html)\n\nThe architecture will be responsible for seeing as input a 784-dim Tensor of pixel values for each image, and producing a Tensor of length 10 (our number of classes) that indicates the class scores for an input image. This particular example uses two hidden layers and dropout to avoid overfitting.",
"_____no_output_____"
]
],
[
[
"import torch.nn as nn\nimport torch.nn.functional as F\n\n## TODO: Define the NN architecture\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # linear layer (784 -> 1 hidden node)\n self.fc1 = nn.Linear(28 * 28, 512)\n self.fc2 = nn.Linear(512, 256)\n self.fc3 = nn.Linear(256, 10)\n \n self.dropout = nn.Dropout(p=0.2)\n\n def forward(self, x):\n # flatten image input\n x = x.view(-1, 28 * 28)\n # add hidden layer, with relu activation function\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n \n return x\n\n# initialize the NN\nmodel = Net()\nprint(model)",
"Net(\n (fc1): Linear(in_features=784, out_features=512, bias=True)\n (fc2): Linear(in_features=512, out_features=256, bias=True)\n (fc3): Linear(in_features=256, out_features=10, bias=True)\n (dropout): Dropout(p=0.2)\n)\n"
]
],
[
[
"### Specify [Loss Function](http://pytorch.org/docs/stable/nn.html#loss-functions) and [Optimizer](http://pytorch.org/docs/stable/optim.html)\n\nIt's recommended that you use cross-entropy loss for classification. If you look at the documentation (linked above), you can see that PyTorch's cross entropy function applies a softmax funtion to the output layer *and* then calculates the log loss.",
"_____no_output_____"
]
],
[
[
"## TODO: Specify loss and optimization functions\n\n# specify loss function\ncriterion = nn.CrossEntropyLoss()\n\n# specify optimizer\noptimizer = torch.optim.SGD(model.parameters(), lr=0.02)",
"_____no_output_____"
]
],
[
[
"---\n## Train the Network\n\nThe steps for training/learning from a batch of data are described in the comments below:\n1. Clear the gradients of all optimized variables\n2. Forward pass: compute predicted outputs by passing inputs to the model\n3. Calculate the loss\n4. Backward pass: compute gradient of the loss with respect to model parameters\n5. Perform a single optimization step (parameter update)\n6. Update average training loss\n\nThe following loop trains for 30 epochs; feel free to change this number. For now, we suggest somewhere between 20-50 epochs. As you train, take a look at how the values for the training loss decrease over time. We want it to decrease while also avoiding overfitting the training data. ",
"_____no_output_____"
]
],
[
[
"# number of epochs to train the model\nn_epochs = 30 # suggest training between 20-50 epochs\n\nmodel.train() # prep model for training\n\nfor epoch in range(n_epochs):\n # monitor training loss\n train_loss = 0.0\n \n ###################\n # train the model #\n ###################\n for data, target in train_loader:\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # backward pass: compute gradient of the loss with respect to model parameters\n loss.backward()\n # perform a single optimization step (parameter update)\n optimizer.step()\n # update running training loss\n train_loss += loss.item()*data.size(0)\n \n # print training statistics \n # calculate average loss over an epoch\n train_loss = train_loss/len(train_loader.dataset)\n\n print('Epoch: {} \\tTraining Loss: {:.6f}'.format(\n epoch+1, \n train_loss\n ))",
"Epoch: 1 \tTraining Loss: 0.556918\nEpoch: 2 \tTraining Loss: 0.222661\nEpoch: 3 \tTraining Loss: 0.156637\nEpoch: 4 \tTraining Loss: 0.119404\nEpoch: 5 \tTraining Loss: 0.095555\nEpoch: 6 \tTraining Loss: 0.078523\nEpoch: 7 \tTraining Loss: 0.065621\nEpoch: 8 \tTraining Loss: 0.055467\nEpoch: 9 \tTraining Loss: 0.047212\nEpoch: 10 \tTraining Loss: 0.040336\nEpoch: 11 \tTraining Loss: 0.034449\nEpoch: 12 \tTraining Loss: 0.029373\nEpoch: 13 \tTraining Loss: 0.025010\nEpoch: 14 \tTraining Loss: 0.021287\nEpoch: 15 \tTraining Loss: 0.018142\nEpoch: 16 \tTraining Loss: 0.015500\nEpoch: 17 \tTraining Loss: 0.013296\nEpoch: 18 \tTraining Loss: 0.011448\nEpoch: 19 \tTraining Loss: 0.009876\nEpoch: 20 \tTraining Loss: 0.008596\nEpoch: 21 \tTraining Loss: 0.007462\nEpoch: 22 \tTraining Loss: 0.006518\nEpoch: 23 \tTraining Loss: 0.005701\nEpoch: 24 \tTraining Loss: 0.005015\nEpoch: 25 \tTraining Loss: 0.004452\nEpoch: 26 \tTraining Loss: 0.003980\nEpoch: 27 \tTraining Loss: 0.003578\nEpoch: 28 \tTraining Loss: 0.003236\nEpoch: 29 \tTraining Loss: 0.002941\nEpoch: 30 \tTraining Loss: 0.002686\n"
]
],
[
[
"---\n## Test the Trained Network\n\nFinally, we test our best model on previously unseen **test data** and evaluate it's performance. Testing on unseen data is a good way to check that our model generalizes well. It may also be useful to be granular in this analysis and take a look at how this model performs on each class as well as looking at its overall loss and accuracy.\n\n#### `model.eval()`\n\n`model.eval(`) will set all the layers in your model to evaluation mode. This affects layers like dropout layers that turn \"off\" nodes during training with some probability, but should allow every node to be \"on\" for evaluation!",
"_____no_output_____"
]
],
[
[
"# initialize lists to monitor test loss and accuracy\ntest_loss = 0.0\nclass_correct = list(0. for i in range(10))\nclass_total = list(0. for i in range(10))\n\nmodel.eval() # prep model for *evaluation*\n\nfor data, target in test_loader:\n # forward pass: compute predicted outputs by passing inputs to the model\n output = model(data)\n # calculate the loss\n loss = criterion(output, target)\n # update test loss \n test_loss += loss.item()*data.size(0)\n # convert output probabilities to predicted class\n _, pred = torch.max(output, 1)\n # compare predictions to true label\n correct = np.squeeze(pred.eq(target.data.view_as(pred)))\n # calculate test accuracy for each object class\n for i in range(batch_size):\n label = target.data[i]\n class_correct[label] += correct[i].item()\n class_total[label] += 1\n\n# calculate and print avg test loss\ntest_loss = test_loss/len(test_loader.dataset)\nprint('Test Loss: {:.6f}\\n'.format(test_loss))\n\nfor i in range(10):\n if class_total[i] > 0:\n print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (\n str(i), 100 * class_correct[i] / class_total[i],\n np.sum(class_correct[i]), np.sum(class_total[i])))\n else:\n print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))\n\nprint('\\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % (\n 100. * np.sum(class_correct) / np.sum(class_total),\n np.sum(class_correct), np.sum(class_total)))",
"Test Loss: 0.070234\n\nTest Accuracy of 0: 98% (970/980)\nTest Accuracy of 1: 99% (1127/1135)\nTest Accuracy of 2: 97% (1011/1032)\nTest Accuracy of 3: 97% (986/1010)\nTest Accuracy of 4: 98% (968/982)\nTest Accuracy of 5: 98% (875/892)\nTest Accuracy of 6: 98% (941/958)\nTest Accuracy of 7: 97% (1003/1028)\nTest Accuracy of 8: 96% (943/974)\nTest Accuracy of 9: 97% (987/1009)\n\nTest Accuracy (Overall): 98% (9811/10000)\n"
]
],
[
[
"### Visualize Sample Test Results\n\nThis cell displays test images and their labels in this format: `predicted (ground-truth)`. The text will be green for accurately classified examples and red for incorrect predictions.",
"_____no_output_____"
]
],
[
[
"# obtain one batch of test images\ndataiter = iter(test_loader)\nimages, labels = dataiter.next()\n\n# get sample outputs\noutput = model(images)\n# convert output probabilities to predicted class\n_, preds = torch.max(output, 1)\n# prep images for display\nimages = images.numpy()\n\n# plot the images in the batch, along with predicted and true labels\nfig = plt.figure(figsize=(25, 4))\nfor idx in np.arange(20):\n ax = fig.add_subplot(2, 20/2, idx+1, xticks=[], yticks=[])\n ax.imshow(np.squeeze(images[idx]), cmap='gray')\n ax.set_title(\"{} ({})\".format(str(preds[idx].item()), str(labels[idx].item())),\n color=(\"green\" if preds[idx]==labels[idx] else \"red\"))",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fcab98b23c0c50af333d238aace461d12cd684 | 27,216 | ipynb | Jupyter Notebook | scraping/SinhalaSongBook/SinhalaSongBook/.ipynb_checkpoints/index_id_add-checkpoint.ipynb | harith96/Sinhala-Songs-Search-Engine | 010b6d4cf5ad2a3621b1e71f01614d396e5e13a4 | [
"MIT"
] | null | null | null | scraping/SinhalaSongBook/SinhalaSongBook/.ipynb_checkpoints/index_id_add-checkpoint.ipynb | harith96/Sinhala-Songs-Search-Engine | 010b6d4cf5ad2a3621b1e71f01614d396e5e13a4 | [
"MIT"
] | null | null | null | scraping/SinhalaSongBook/SinhalaSongBook/.ipynb_checkpoints/index_id_add-checkpoint.ipynb | harith96/Sinhala-Songs-Search-Engine | 010b6d4cf5ad2a3621b1e71f01614d396e5e13a4 | [
"MIT"
] | null | null | null | 40.926316 | 84 | 0.340204 | [
[
[
"import pandas as pd",
"_____no_output_____"
],
[
"df = pd.read_json(\"lyrics.json\")",
"_____no_output_____"
],
[
"global count\ncount = 0\n\ndef insert_index_id_columns(row):\n global count\n count = 1\n return pd.Series([count, \"songs\"], index=['song_id', '_index'])\n\ndf = pd.concat([df, df.apply(insert_index_id_columns, axis=1)], axis = 1)",
"_____no_output_____"
],
[
"df.sample(100)",
"_____no_output_____"
],
[
"df.to_json(\"lyrics.json\", orient='records', index=True)",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code"
]
] |
e7fcadd0a5bc034ee7b2037243e3dcc061d0ed4b | 33,880 | ipynb | Jupyter Notebook | Week05/WS04/Workshop04.ipynb | ds-connectors/Physics-88-Fa21 | 147ea6ea06798fc6e7d7eac9f06076365c291fc9 | [
"BSD-3-Clause"
] | 1 | 2021-08-30T17:52:58.000Z | 2021-08-30T17:52:58.000Z | Week05/WS04/Workshop04.ipynb | ds-connectors/Physics-88-Fa21 | 147ea6ea06798fc6e7d7eac9f06076365c291fc9 | [
"BSD-3-Clause"
] | null | null | null | Week05/WS04/Workshop04.ipynb | ds-connectors/Physics-88-Fa21 | 147ea6ea06798fc6e7d7eac9f06076365c291fc9 | [
"BSD-3-Clause"
] | null | null | null | 36.080937 | 838 | 0.599351 | [
[
[
"## Workshop 4\n### File Input and Output (I/O)\n\n**Submit this notebook to bCourses (ipynb and pdf) to receive a grade for this Workshop.**\n\nPlease complete workshop activities in code cells in this iPython notebook. The activities titled **Practice** are purely for you to explore Python. Some of them may have some code written, and you should try to modify it in different ways to understand how it works. Although no particular output is expected at submission time, it is _highly_ recommended that you read and work through the practice activities before or alongside the exercises. However, the activities titled **Exercise** have specific tasks and specific outputs expected. Include comments in your code when necessary. The workshop should be submitted on bCourses under the Assignments tab.\n\n**The homework should be submitted on bCourses under the Assignments tab (both the .ipynb and .pdf files). Please label it by your student ID number (SIS ID)**\n\n[Exercises start here](#exercises)",
"_____no_output_____"
],
[
"In this notebook, we're going to explore some ways that we can store data in files, and extract data from files. Let's just get all of the importing out of the way:",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt",
"_____no_output_____"
]
],
[
[
"## Practice: Basic Writing and Reading ASCII Files",
"_____no_output_____"
],
[
"Think of ASCII files as text files. You can open them using a text editor (like vim or emacs in Unix, or Notepad in Windows) and read the information they contain directly. There are a few ways to produce these files, and to read them once they've been produced. In Python, the simplest way is to use file objects. \n\nLet's give it a try. We create an abstract file object by calling the function `open( filename, access_mode )` and assigning its return value to a variable (usually `f`). The argument `filename` just specifices the name of the file we're interested in, and `access_mode` tells Python what we plan to do with that file: \n\n 'r': read the file \n 'w': write to the file (creates a new file, or clears an existing file)\n 'a': append the file \n \nNote that both arguments should be strings.\nFor full syntax and special arguments, see documentation at https://docs.python.org/2/library/functions.html#open",
"_____no_output_____"
]
],
[
[
"f = open( 'welcome.txt', 'w' )",
"_____no_output_____"
]
],
[
[
"**A note of caution**: as soon as you call `open()`, Python creates a new file with the name you pass to it if you open it in write mode (`'w'`). Python will overwrite existing files if you open a file of the same name in write ('`w`') mode.",
"_____no_output_____"
],
[
"Now we can write to the file using `f.write( thing_to_write )`. We can write anything we want, but it must be formatted as a string.",
"_____no_output_____"
]
],
[
[
"topics = ['Data types', 'Loops', 'Functions', 'Arrays', 'Plotting', 'Statistics']",
"_____no_output_____"
],
[
"f.write( 'Welcome to Physics 77, Fall 2021\\n' ) # the newline command \\n tells Python to start a new line\nf.write( 'Topics we will learn about include:\\n' )\nfor top in topics:\n f.write( top + '\\n')\nf.close() # don't forget this part!",
"_____no_output_____"
]
],
[
[
"**Practice 1:** Use the syntax you have just learned to create an ASCII file titled \"`sine.txt`\" with two columns containing 20 x and 20 y values. The x values should range from $0$ to $2\\pi$ - you can use `np.linspace()` to generate these values (as many as you want). The y values should be $y = sin(x)$ (you can use `np.sin()`) for this. Then, use a `for` loop as above to write a new line for each pair of x and y values. To make sure that each x,y pair is on a new line, remember to add `\\n` to the end of each line like above. To separate the values by a tab so that the columns are nicely aligned, you can use the \"character\" `\\t`. So `\\n` inserts a new line and `\\t` inserts a tab. You may wish to use some kind of string formatting to decimals from running too far. Here is an example with just one data point:\n\n x = 0.5 * np.pi\n y = np.sin(x)\n print(\"%.5f \\t %.5f\" % (x,y))\n\nPay close attention to the fact that when you use the `write` function, the argument that you pass to it needs to be a string.",
"_____no_output_____"
]
],
[
[
"# Code for Practice 1",
"_____no_output_____"
]
],
[
[
"Now we will show how to *read* the values from `welcome.txt` back out:",
"_____no_output_____"
]
],
[
[
"f = open( 'welcome.txt', 'r' )\nfor line in f:\n print(line.strip())\nf.close()",
"_____no_output_____"
]
],
[
[
"**Practice 2:** In the cell immediately above, you see that we print `line.strip()` instead of just printing `line`. Remove the `.strip()` part and see what happens. ",
"_____no_output_____"
],
[
"Suppose we wanted to skip the first two lines of `welcome.txt` and print only the list of topics `('Data types', 'Loops', 'Functions', 'Arrays', 'Plotting', 'Statistics')`. We can use `readline()` to \"read\" the first two lines but not store their value, thereby ignoring them.",
"_____no_output_____"
]
],
[
[
"f = open( 'welcome.txt', 'r' )\nf.readline()\nf.readline() # skip the first two lines\ntopicList = []\nfor line in f:\n topicList.append(line.strip())\nf.close()\nprint(topicList)",
"_____no_output_____"
]
],
[
[
"Python reads in spacing commands from files as well as strings. The `.strip()` just tells Python to ignore those spacing commands. What happens if you remove it from the code above?",
"_____no_output_____"
],
[
"**Practice 3:** Use the syntax you have just learned to read back each line of x and y values from the `sine.txt` file that you just wrote in Practice 1. Don't worry about breaking up the lines into individual values quite yet.",
"_____no_output_____"
]
],
[
[
"# Code for Practice 3",
"_____no_output_____"
]
],
[
[
"### Practice Reading in Numerical Data as Floats",
"_____no_output_____"
],
[
"Numerical data can be somewhat trickier to read in than strings. In the practices above, you read in `sine.txt` but each line was a `string` not a pair of `float` values. Let's read in a file I produced in another program, that contains results from a BaBar experiment, where we searched for a \"dark photon\" produced in e+e- collisions. The data are presented in two columns: \n\n mass charge\n\nEvery time we read in a new line, it is going to start out being a `string`. To convert a line like\n\n 1.57079 1.00000\n \ninto a pair of values we need to do two things. The first is we need to split that string into two pieces. Fortunately, there is a function to do that for us. Suppose that we read in a `line` and we want to split it. We can do it as follows:\n\n line.split()\n\nFor the line above, calling `.split()` would return the following `list`:\n\n ['1.57079','1.00000']\n \nFrom there, we need to convert each value in the list into a `float` and store those values somewhere. This can be done using the `float()` function:\n\n x_values = []\n y_values = []\n split_values = ['1.57079','1.00000']\n x_values.append(float(split_values[0]))\n y_values.append(float(split_values[1]))\n\nNow `x_values` is a `list` containing 1 element which is the `float` value `1.57079` and `y_values` is a `list` containing 1 element which is the `float` value `1.00000`.",
"_____no_output_____"
]
],
[
[
"# Example using BaBar_2016.dat\n\nf = open('BaBar_2016.dat', 'r')\n# read each line, split the data wherever there's a blank space,\n# and convert the values to floats\n\n# lists where we will store the values we read in\nmass = []\ncharge = []\nfor line in f:\n tokens = line.split()\n mass.append(float(tokens[0]))\n charge.append(float(tokens[1]))\nf.close()",
"_____no_output_____"
]
],
[
[
"We got it; let's plot it!",
"_____no_output_____"
]
],
[
[
"import matplotlib.pyplot as plt\n%matplotlib inline\n\nplt.plot(mass, charge, 'r-' )\nplt.xlim(0, 8)\nplt.ylim(0, 2e-3)\nplt.xlabel('mass (GeV)')\nplt.ylabel('charge, 90% C.L. limit')\nplt.show()",
"_____no_output_____"
]
],
[
[
"**Practice 4:** Use the syntax you have just learned to read back each line of x and y values from the sine.txt file that you wrote in Practice 1, and split each line into `float` values and store them. Then, plot your stored x and y values to make sure you have done everything correctly",
"_____no_output_____"
]
],
[
[
"# Code for Practice 4",
"_____no_output_____"
]
],
[
[
"Of course, you already know of another way to read in values like this: `numpy.loadtxt()` and `numpy.genfromtxt()`. If you have already been using those, feel free to move on. Otherwise, take a moment to make yourself aware of these functions as they will massively simplify your life.",
"_____no_output_____"
],
[
"Fortunately, Python's `numpy` library has functions for converting file information into numpy arrays, which can be easily analyzed and plotted. The above can be accomplished with a lot less code (and a lot less head scratching!)",
"_____no_output_____"
],
[
"The `genfromtxt` function takes as it's argument the name of the file you want to load, and any optional arguments you want to add to help with the loading and formatting process. Some of the most useful optional arguments are: \n\n **dtype**: data type of the resulting array \n **comments**: the character that indicates the start of a comment (e.g. '#') \n lines following these characters will be ignored, and not read into the array \n **delimiter**: the character used to separate values. Often, it's whitespace, \n but it could also be ',', '|', or others \n **skip_header**: how many lines to skip at the beginning of the file \n **skip_footer**: how many lines to skip at the end of the file \n **use_cols**: which columns to load and which to ignore \n **unpack**: If True (the default is False), the array is transposed \n (i.e., you can a set of columns, not a set of rows.) You can accomplish the same thing with `genfromtxt( file, opt_args,...).T`",
"_____no_output_____"
],
[
"Reload the spectral data and reproduce the plot above using `loadtxt` or `genfromtxt`. \n**Hint:** You may find it helpful to use `numpy.split( array, N )`, which splits `array` into\n`N` equal-length parts, and returns them as a list.",
"_____no_output_____"
]
],
[
[
"# Same plot as before but now using numpy functions to load the data\n\nimport numpy as np\nmass, charge = np.loadtxt('BaBar_2016.dat', unpack = True)\nplt.plot(mass, charge,'r-')\nplt.xlim(0, 8)\nplt.ylim(0, 2e-3)\nplt.xlabel('mass (GeV)')\nplt.ylabel('charge, 90% C.L. limit')\nplt.show()",
"_____no_output_____"
]
],
[
[
"## Practice: Writing and Reading CSV files",
"_____no_output_____"
],
[
"CSV stands for Comma Separated Values. Python's `csv` module allows easy reading and writing of sequences. CSV is especially useful for loading data from spreadsheets and databases.",
"_____no_output_____"
],
[
"Let's make a list and write a file! \nFirst, we need to load a new module that you have not used yet in this course: `csv`",
"_____no_output_____"
]
],
[
[
"import csv",
"_____no_output_____"
]
],
[
[
"Next, just as before we need to create an abstract file object that opens the file we want to write to. \nThen, we create another programming abstraction called a *csv writer*, a special object that is built specifically to write sequences to our csv file. In this example, we have called the abstract file object `f_csv` and we have called the abstract csv writer object `SA_writer`",
"_____no_output_____"
]
],
[
[
"f_csv = open( 'nationData.csv', 'w' )\nSA_writer = csv.writer( f_csv, # write to this file object\n delimiter = '|', # place vertical bar between items we write\n quotechar = '', # Don't place quotes around strings\n quoting = csv.QUOTE_NONE )# made up of multiple words ",
"_____no_output_____"
]
],
[
[
"Make sure that you understand at this point that all we have done is create a writer. It has not written anything to the file yet. So let's try to write the following lists of data:",
"_____no_output_____"
]
],
[
[
"countries = ['Argentina', 'Bolivia', 'Brazil', 'Chile', 'Colombia', 'Ecuador', 'Guyana',\\\n 'Paraguay', 'Peru', 'Suriname', 'Uruguay', 'Venezuela']\ncapitals = ['Buenos Aires', 'Sucre', 'Brasília', 'Santiago', 'Bogotá', 'Quito', 'Georgetown',\\\n 'Asunción', 'Lima', 'Paramaribo', 'Montevideo', 'Caracas']\npopulation_mils = [ 42.8, 10.1, 203.4, 16.9, 46.4, 15.0, 0.7, 6.5, 29.2, 0.5,\\\n 3.3, 27.6]",
"_____no_output_____"
]
],
[
[
"Now let's figure out how to add a line to our CSV file. For a regular ASCII file, we added lines by calling the `write` function. For a CSV file, we use a function called `writerow` which is attributed to our abstract csv writer `SA_writer`:",
"_____no_output_____"
]
],
[
[
"SA_writer.writerow(['Data on South American Nations'])\nSA_writer.writerow(['Country', 'Capital', 'Populaton (millions)'])\nfor i in range(len(countries)):\n SA_writer.writerow( [countries[i], capitals[i], population_mils[i]] )\nf_csv.close()",
"_____no_output_____"
]
],
[
[
"Now let's see if we can open your file using a SpreadSheet program. If you don't have access to one, find someone who does! \n\n* Download nationData.csv\n* Open Microsoft Excel (or equivalent), and select \"Import Data.\" \n* Locate nationData.csv in the list of files that pops up. \n* Select the \"Delimited\" Option in the next dialog box, and hit \"Next\"\n* Enter the appropriate delimiter in the next pop-up box, and hit finish.\n\nHow did we do?",
"_____no_output_____"
],
[
"**Practice 5:** Use syntax learned above to generate a csv file called `sine.csv` with pairs of x and y values separated by a comma. It should end up looking sort of like\n\n 0.0,0.0\n 1.57079632679,1.00000000\nNotice a few things: we don't need to use any formatting of the numbers. It doesn't matter how many decimal places each value has on each line. Python will just use the comma to figure out where one number ends and another begins",
"_____no_output_____"
],
[
"We can use a similar process to *read* data from a csv file back into Python. Let's read in a list of the most populous cities from `cities.csv` and store them for analysis.",
"_____no_output_____"
]
],
[
[
"cities = []\ncityPops = []\nmetroPops = []",
"_____no_output_____"
],
[
"f_csv = open( 'cities.csv', 'r')\nreadCity = csv.reader( f_csv, delimiter = ',' )\n\n# The following line is how we skip a line in a csv. It is the equivalent of readline from before.\nnext(readCity) # skip the header row\n\n\nfor row in readCity:\n print(row)\nf_csv.close() ",
"_____no_output_____"
]
],
[
[
"Look at the output of the code above. Every `row` that is read in is a `list` of `strings` by default again. So in order to use the numbers *as numbers* we need to convert them using the `float()` operation. Below, we use this to figure out which city has the largest city population:",
"_____no_output_____"
]
],
[
[
"f_csv = open( 'cities.csv', 'r')\nreadCity = csv.reader( f_csv, delimiter = ',' )\n\nlargest_city_pop = 0.0\ncity_w_largest_pop = None\n\n# The following line is how we skip a line in a csv. It is the equivalent of readline from before.\nnext(readCity) # skip the header row\n\n\nfor row in readCity:\n city_country = ', '.join(row[0:2]) # joins the city and country strings using a comma, like \"Shanghai, China\"\n cityPop = float(row[2])\n metroPop = float(row[3])\n \n # if the population of this city is the largest seen so far, update\n if cityPop > largest_city_pop:\n largest_city_pop = cityPop\n city_w_largest_pop = city_country\nf_csv.close()\n\nprint(\"The city with the largest population is: %s with a population of %.1f million people\" % (city_w_largest_pop, largest_city_pop))",
"_____no_output_____"
]
],
[
[
"**Practice 6:** Use the syntax learned above to read in the x and y values from your `sine.csv` file. Plot your data to be sure you did everything correctly.",
"_____no_output_____"
]
],
[
[
"# Code for Practice 6",
"_____no_output_____"
]
],
[
[
"<a id='exercises'></a>",
"_____no_output_____"
],
[
"## Exercises\n\n",
"_____no_output_____"
],
[
"**Exercise 1:** This exercise is meant to put many of the skills you have practiced thus far together. In this exercise, we are going to use I/O methods to see if we can find some correlations in some fake housing data. You will need the following files which should be in your directory:\n\n house_locs_rent.txt\n bus_stops.txt\n grocery_stores.txt\n\nThe file `house_locs_rent.txt` is a list of the locations of 500 houses and ther respective rents, and it has 3 columns:\n \n x-coordinate y-coordinate rent (in USD)\n\nThe file `bus_stops.txt` is a list of the locations of bus stops and it has 2 columns:\n\n x-coordinate y-coordinate\n\nThe file `grocery_stores.txt` is a list of the locaitons of grocery stores and it has 2 columns: \n\n x-coordinate y-coordinate\n\nAll 3 files have one-line headers that you will want to ignore when loading the data. The goal of the exercise is to determine how much of the variation in the rent is predicted by variation in the distance between a house and its closest bus stop or by the distance between a house and its closest grocery store.\n\nTo determine this, for each of the 500 houses, you will need to first calculate its distance to its nearest bus stop and its distance to its nearest grocery store.\n\nThen, we will use a measure called the Pearson correlation coefficient (which you will use in Homework04) to give an estimate of how much of the variation in the rent is predicted by variation in these distances. The Pearson correlation coefficient is defined as follows:\n\n### Correlation Coefficient\n\nSuppose I have $N$ data points each with two variables $X_i$ and $Y_i$, where $i = 1\\dots N$. Suppose I want to know how much of the variation in $Y$ is predicted by the variation in $X$. The correlation coefficient $R$ is a value between -1 and 1 with the following meaning: when $R=0$, $X$ and $Y$ are independent of each other. When $R>0$, we say they are positively correlated because if $X$ increases we can expect that $Y$ will increase as well. When $R<0$ we say they are negatively or oppositely correlated because if $X$ increases we can expect that $Y$ will decrease. $R$ is defined as follows:\n\n$$R = \\frac{\\mathbb{E}[(X_i - \\mu_X)(Y_i - \\mu_Y)]}{\\sigma_X \\sigma_Y} = \\frac{1}{N \\sigma_X \\sigma_Y} \\sum_{i=1}^N \n(X_i - \\mu_X)(Y_i - \\mu_Y)$$\n\nwhere $\\mu_X$ is the average of $X_i$ over the dataset, $\\mu_Y$ is the average of $Y_i$ over the dataset, $\\sigma_X$ is the standard deviation of $X_i$ over the dataset, and $\\sigma_Y$ is the standard deviation of $Y_i$ over the dataset. For calculating those quantities, you may find `np.mean()` and `np.std()` helpful.\n\n**However**, you must write your *own* correlation coefficient function. It can use `np.mean()` and `np.std()` but it should not call `np.cov` or `np.corrcoef` . \n\n**Output:** Your code should contain a function to calculate correlation coefficients as well as any other functions that you want to write (for example, a distance function, a minimum distance function...). The output should be the correlation coefficients between the pairs of variables (minimum distances to bus stops, minimum distances to grocery stores, rents) appropriately labeled. Also, create a CSV file titled `distances_rents.csv` and write the values of the minimum distances to the bus_stop and grocery store for each house along with its rent. For example, if the closest bus stop to the first house is 0.5 away and the closest grocery store to the first house is 1.5 away and the rent of the first house is 1250, then the first line of the CSV should read:\n\n 0.5,1.5,1250\n\n**Optional:** See if you can guess how I generated this fake data. To help sharpen your guess, try transforming the variables before computing the correlation coefficients. If the magnitude of the correlation coefficients goes up, that can be an indicator that you have found the correct form of the function. ",
"_____no_output_____"
]
],
[
[
"# Code for Exercise 1 goes here",
"_____no_output_____"
]
],
[
[
"## OPTIONAL: Practice With HDF5 Files",
"_____no_output_____"
],
[
"So far you have encountered a standard ASCII text file and a CSV file. The next file format is called an HDF5 file. HDF5 files are ideally suited for managing large amounts of complex data. Python can read them using the module `h5py.`",
"_____no_output_____"
]
],
[
[
"import h5py",
"_____no_output_____"
]
],
[
[
"Let's load our first hdf5 file into an abstract file object. We call ours `fh5` in the example below:",
"_____no_output_____"
]
],
[
[
"fh5 = h5py.File( 'solar.h5py', 'r' )",
"_____no_output_____"
]
],
[
[
"Here is how data is stored in an HDF5 file:\n hdf5 files are made up of data sets\n Each data set has a name. The correct Python terminology for this is \"key\". Let's take a look at what data sets are in `solar.h5py`. You can access the names (keys) of these data sets using the `.keys()` function:",
"_____no_output_____"
]
],
[
[
"for k in fh5.keys(): # loop through the keys\n print(k)",
"_____no_output_____"
]
],
[
[
"To access one of the 6 data sets above, we need to use its name from above. Here we access the data set called `\"names\"`:",
"_____no_output_____"
]
],
[
[
"for nm in fh5[\"names\"]: # make sure to include the quotation marks!\n print(nm)",
"_____no_output_____"
]
],
[
[
"So the dataset called `\"names\"` contains 8 elements (poor Pluto) which are strings. In this HDF5 file, the other data sets contain `float` values, and can be treated like numpy arrays:",
"_____no_output_____"
]
],
[
[
"print(fh5[\"solar_AU\"][::2])\nprint(fh5[\"surfT_K\"][fh5[\"names\"]=='Earth'])",
"_____no_output_____"
]
],
[
[
"Let's make a plot of the solar system that shows each planet's: \n* distance from the sun (position on the x-axis)\n* orbital period (position on the y-axis\n* mass (size of scatter plot marker)\n* surface temperature (color of marker)\n* density (transparency (or alpha, in matplotlib language))",
"_____no_output_____"
]
],
[
[
"distAU = fh5[\"solar_AU\"][:]\nmass = fh5[\"mass_earthM\"][:]\ntorb = fh5[\"TOrbit_yr\"][:]\ntemp = fh5[\"surfT_K\"][:]\nrho = fh5[\"density\"][:]\nnames = fh5[\"names\"][:]",
"_____no_output_____"
],
[
"def get_size( ms ):\n m = 400.0/(np.max(mass) - np.min(mass))\n return 100.0 + (ms - np.min(mass))*m \ndef get_alpha( p ):\n m = .9/(np.max(rho)-np.min(rho))\n return .1+(p - np.min(rho))*m",
"_____no_output_____"
],
[
"alfs = get_alpha(rho)",
"_____no_output_____"
],
[
"import matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nnorm = mpl.colors.Normalize(vmin=np.min(temp), vmax=np.max(temp))\ncmap = plt.cm.cool\nm = plt.cm.ScalarMappable(norm=norm, cmap=cmap)\n\nfig, ax = plt.subplots(1)\nfor i in range(8):\n ax.scatter( distAU[i], torb[i], s = get_size(mass[i]), color = m.to_rgba(temp[i]), alpha=alfs[i] ) \nax.set_xscale('log')\nax.set_yscale('log')\nax.set_ylim(-5,200)\nax.set_ylabel( 'orbital period (y)' )\nax.set_xlabel( 'average dist. from sun (AU)' )\nax.set_title( 'Our solar system' )",
"_____no_output_____"
]
],
[
[
"Play around with the data and see what interesting relationships you can find!",
"_____no_output_____"
],
[
"If you ever want to write your own HDF5 file, you can open an h5py file object by calling: \n\n fh5 = h5py.File('filename.h5py', 'w') \n \nData sets are created with\n\n dset = fh5.create_dataset( \"dset_name\", (shape,))\n \nThe default data type is float. \nThe values for the data set are then set with: \n\n dset[...] = ( ) \n \nwhere the parenthesis contain an array or similar data of the correct shape. After you've added all your data sets, close the file with \n\n fh5.close() \nIf you have extra time, try creating your own data set and read it back in to verify that you've done it correctly!",
"_____no_output_____"
],
[
"## OPTIONAL: Practice With Binary Files",
"_____no_output_____"
],
[
"So far, we've been dealing with text files. If you opened these files up with a text editor, you could see what was written in them. Binary files are different. They're written in a form that Python (and other languages) understand how to read, but we can't access them directly. The most common binary file you'll encounter in python is a *.npy* file, which stores numpy arrays. You can create these files using the command `np.save( filename, arr )`. That command will store the array `arr` as a file called filename, which should have the extension .npy. We can then reload the data with the command `np.load(filename)`",
"_____no_output_____"
]
],
[
[
"x = np.linspace(-1, 1.0, 100)\ny = np.sin(10*x)*np.exp(-x) - x\nxy = np.hstack((x,y))",
"_____no_output_____"
],
[
"# save the array\nnp.save('y_of_x.npy', xy )",
"_____no_output_____"
],
[
"del x, y, xy # erase these variables from Python's memory",
"_____no_output_____"
]
],
[
[
"Now reload the data and check that you can use it just as before.",
"_____no_output_____"
],
[
"Bonus challenge! Load the file `mysteryArray.npy`, and figure out the best way to plot it. \n**Hint:** look at the shape of the file, and the dimensions. ",
"_____no_output_____"
]
],
[
[
"data = np.load('mysteryArray.npy')",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown"
],
[
"code"
]
] |
e7fcbd661cdeb21c18d5cba1221c360584cfe2e1 | 8,613 | ipynb | Jupyter Notebook | Coin detection.ipynb | Viniths28/ML-and-AI | 5ab1ba34cf32047c0ed8318347e5de78ebabcc3c | [
"Apache-2.0"
] | null | null | null | Coin detection.ipynb | Viniths28/ML-and-AI | 5ab1ba34cf32047c0ed8318347e5de78ebabcc3c | [
"Apache-2.0"
] | 1 | 2020-09-22T17:55:22.000Z | 2020-09-22T17:55:22.000Z | Coin detection.ipynb | Viniths28/ML-and-AI | 5ab1ba34cf32047c0ed8318347e5de78ebabcc3c | [
"Apache-2.0"
] | null | null | null | 22.785714 | 87 | 0.503077 | [
[
[
"from sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\n\nimport math\nimport numpy as np\nimport argparse\nimport glob\nimport cv2\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.model_selection import train_test_split\nfrom __future__ import division\nfrom math import cos, sin",
"_____no_output_____"
],
[
"image = cv2.imread(\"D:/coin.jpg\", 1)",
"_____no_output_____"
],
[
"d = 1024 / image.shape[1]\ndim = (1024, int(image.shape[0] * d))\nimage = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\ncv2.imshow(\"input\", image)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n",
"_____no_output_____"
],
[
"output = image.copy()",
"_____no_output_____"
],
[
"gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)",
"_____no_output_____"
],
[
"clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\ngray = clahe.apply(gray)",
"_____no_output_____"
],
[
"def calcHistogram(img):\n # create mask\n m = np.zeros(img.shape[:2], dtype=\"uint8\")\n (w, h) = (int(img.shape[1] / 2), int(img.shape[0] / 2))\n cv2.circle(m, (w, h), 60, 255, -1)\n h = cv2.calcHist([img], [0, 1, 2], m, [8, 8, 8], [0, 256, 0, 256, 0, 256])\n return cv2.normalize(h, h).flatten()",
"_____no_output_____"
],
[
"def calcHistFromFile(file):\n img = cv2.imread(file)\n return calcHistogram(img)",
"_____no_output_____"
],
[
"class Enum(tuple): __getattr__ = tuple.index",
"_____no_output_____"
],
[
"Material = Enum(('Copper', 'Brass', 'Euro1', 'Euro2'))",
"_____no_output_____"
],
[
"sample_images_copper = glob.glob(\"D:/Masterfile/sample_images/copper/*\")\nsample_images_brass = glob.glob(\"D:/Masterfile/sample_images/brass/*\")\nsample_images_euro1 = glob.glob(\"D:/Masterfile/sample_images/euro1/*\")\nsample_images_euro2 = glob.glob(\"D:/Masterfile/sample_images/euro2/*\")",
"_____no_output_____"
],
[
"X = []\ny = []\nfor i in sample_images_copper:\n X.append(calcHistFromFile(i))\n y.append(Material.Copper)\nfor i in sample_images_brass:\n X.append(calcHistFromFile(i))\n y.append(Material.Brass)\nfor i in sample_images_euro1:\n X.append(calcHistFromFile(i))\n y.append(Material.Euro1)\nfor i in sample_images_euro2:\n X.append(calcHistFromFile(i))\n y.append(Material.Euro2)",
"_____no_output_____"
],
[
"clf = MLPClassifier(solver=\"lbfgs\")",
"_____no_output_____"
],
[
"X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=.4)",
"_____no_output_____"
],
[
"clf.fit(X_train, y_train)\nscore = int(clf.score(X_test, y_test) * 100)\nprint(\"Classifier mean accuracy: \", score)",
"Classifier mean accuracy: 94\n"
],
[
"blurred = cv2.GaussianBlur(gray, (7, 7), 0)",
"_____no_output_____"
],
[
"circles = cv2.HoughCircles(blurred, cv2.HOUGH_GRADIENT, dp=2.2, minDist=100,\n param1=200, param2=100, minRadius=50, maxRadius=120)",
"_____no_output_____"
],
[
"\ndef predictMaterial(roi): \n hist = calcHistogram(roi)\n s = clf.predict([hist])\n return Material[int(s)]\n\n\n\ndiameter = []\nmaterials = []\ncoordinates = []\n\ncount = 0\nif circles is not None:\n for (x, y, r) in circles[0, :]:\n diameter.append(r)\n\n circles = np.round(circles[0, :]).astype(\"int\")\n for (x, y, d) in circles:\n count += 1\n coordinates.append((x, y))\n\n roi = image[y - d:y + d, x - d:x + d]\n material = predictMaterial(roi)\n materials.append(material)\n\n if False:\n m = np.zeros(roi.shape[:2], dtype=\"uint8\")\n w = int(roi.shape[1] / 2)\n h = int(roi.shape[0] / 2)\n cv2.circle(m, (w, h), d, (255), -1)\n maskedCoin = cv2.bitwise_and(roi, roi, mask=m)\n \n\n cv2.circle(output, (x, y), d, (0, 255, 0), 2)\n cv2.putText(output, material,\n (x - 40, y), cv2.FONT_HERSHEY_PLAIN,\n 1.5, (0, 255, 0), thickness=2, lineType=cv2.LINE_AA)",
"_____no_output_____"
],
[
"d = 500 / output.shape[1] \ndim = (500, int(output.shape[0] * d))\nimage = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)\noutput = cv2.resize(output, dim, interpolation=cv2.INTER_AREA)",
"_____no_output_____"
],
[
"cv2.imshow(\"Output\",(output))\ncv2.waitKey()\ncv2.destroyAllWindows()\nimage2=cv2.imread(\"output.jpg\",1)\nprint('done')",
"done\n"
],
[
"cv2.imwrite(\"D:/Coindetected.jpg\", output) \n",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fcc7fac9b8aecb4866223ef6a33fc34167f43a | 53,984 | ipynb | Jupyter Notebook | paper-plotting/paths_counting.ipynb | twistedcubic/attention-rank-collapse | 38b5df6dc2add25f6d945e48a6baf96862368c20 | [
"Apache-2.0"
] | 118 | 2021-03-08T01:46:30.000Z | 2022-02-10T06:51:20.000Z | paper-plotting/paths_counting.ipynb | twistedcubic/attention-rank-collapse | 38b5df6dc2add25f6d945e48a6baf96862368c20 | [
"Apache-2.0"
] | null | null | null | paper-plotting/paths_counting.ipynb | twistedcubic/attention-rank-collapse | 38b5df6dc2add25f6d945e48a6baf96862368c20 | [
"Apache-2.0"
] | 11 | 2021-03-08T10:21:11.000Z | 2021-12-30T13:03:20.000Z | 278.268041 | 48,164 | 0.913345 | [
[
[
"import matplotlib.pyplot as plt\nimport numpy as np\nimport jax\nimport jax.numpy as jnp\nfrom functools import partial\nimport scipy\nimport itertools\nimport matplotlib\nimport seaborn as sns",
"_____no_output_____"
],
[
"architectures = [\n (\"DistilBert\", 12, 6),\n (\"MobileBert\", 4, 12),\n (\"Bert-Base, ViT-B\", 12, 12),\n (\"Bert-Large, ViT-L\", 16, 24),\n (\"ViT-H\", 16, 32),\n (\"T5-3B\", 32, 24),\n (\"T5-11B\", 128, 24),\n (\"GPT-3\", 96, 96),\n (\"DeiT-T\", 3, 12),\n]\n\ndef num_paths(k, L, H):\n return 10 ** (np.log10(scipy.special.comb(L, k)) + k * np.log10(H))\n\nfrom collections import defaultdict\n\nmarkers = [\"o\", \"v\", \"^\", \"<\", \">\", \"s\", \"*\", \"h\", \"H\", \"+\", \"x\", \"X\", \"D\", \"d\", \"1\", \"2\", \"3\", \"4\", \"8\", \"p\",\"P\", ]\nmarker = defaultdict(lambda: markers[len(marker)])\nmarker[\"GPT-3\"] = \".\"\n\nplot_attrs = [\"num_heads\"]\n# plot_attrs = [\"layers\", \"num_heads\", \"ratio\"]\nfig, axes = plt.subplots(1, len(plot_attrs), figsize=(12 * len(plot_attrs), 4))\n\nfor ax, attr in zip([axes], plot_attrs):\n plt.sca(ax)\n\n color_attr = {\n \"layers\": lambda heads, layers: layers,\n \"heads\": lambda heads, layers: heads,\n \"ratio\": lambda heads, layers: layers / heads,\n \"num_heads\": lambda heads, layers: layers * heads,\n }[attr]\n\n cmap_name = {\n \"layers\": \"crest\",\n \"heads\": \"flare\",\n \"ratio\": \"viridis\",\n \"num_heads\": \"crest\"\n }[attr]\n\n title = {\n \"layers\": \"Comparison by number of layers L\",\n \"heads\": \"Comparison by number of heads H\",\n \"ratio\": \"Comparison by ratio L / H\",\n \"num_heads\": \"Comparison by total number of heads\"\n }[attr]\n\n num_colors = len(set(color_attr(h, l) for _, h, l in architectures))\n ordered_color_attr = sorted(list(set(color_attr(h, l) for _, h, l in architectures)))\n cmap = plt.cm.get_cmap(cmap_name, num_colors)\n norm = matplotlib.colors.Normalize(vmin=6, vmax=96)\n\n\n for _, (name, heads, layers) in enumerate(sorted(architectures, reverse=True, key=lambda r: (color_attr(r[1], r[2]), r[1], r[2]))):\n depths = jnp.arange(0, layers + 1)\n counts = np.array([num_paths(d, L=layers, H=heads) for d in depths])\n counts /= counts.sum()\n # depths /= depths.max()\n plt.plot(depths, counts, label=f\"{name:<18}L={layers:>2} H={heads:>3}\", marker=marker[name], c=cmap(ordered_color_attr.index(color_attr(heads, layers))))\n\n plt.set_cmap(cmap)\n plt.legend(prop={'family': 'monospace'})\n plt.grid(alpha=.5)\n plt.ylabel(\"Proportion of paths\")\n plt.xlabel(\"Path length\")\n plt.xscale(\"log\")\n ticks = list(range(1, 6)) + [6, 12, 24, 32, 96]\n plt.xticks(ticks=ticks, labels=list(map(str, ticks)))\n plt.minorticks_off()\n",
"WARNING:absl:No GPU/TPU found, falling back to CPU. (Set TF_CPP_MIN_LOG_LEVEL=0 and rerun for more info.)\n"
],
[
"import pathlib\npathlib.Path(\"figures\").mkdir(parents=True, exist_ok=True)\nfilename = \"figures/path_length_distribution\"\nfig.savefig(f\"{filename}.pdf\")\n!pdfcrop {filename}.pdf {filename}_croped.pdf",
"PDFCROP 1.38, 2012/11/02 - Copyright (c) 2002-2012 by Heiko Oberdiek.\n==> 1 page written on `figures/path_length_distribution_croped.pdf'.\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code"
]
] |
e7fccb4c1413e82a64e0244cde25759be0bf0a36 | 2,793 | ipynb | Jupyter Notebook | courses/dl1/multi_label_mri_modality_classification.ipynb | mingrui/fastai | ef3533d11ef9b64b27ced38e2fc26de8c9ed7132 | [
"Apache-2.0"
] | 1 | 2022-02-20T11:52:34.000Z | 2022-02-20T11:52:34.000Z | courses/dl1/multi_label_mri_modality_classification.ipynb | mingrui/fastai | ef3533d11ef9b64b27ced38e2fc26de8c9ed7132 | [
"Apache-2.0"
] | null | null | null | courses/dl1/multi_label_mri_modality_classification.ipynb | mingrui/fastai | ef3533d11ef9b64b27ced38e2fc26de8c9ed7132 | [
"Apache-2.0"
] | null | null | null | 18.871622 | 71 | 0.517007 | [
[
[
"%reload_ext autoreload\n%autoreload 2\n%matplotlib inline",
"_____no_output_____"
],
[
"import torch",
"_____no_output_____"
],
[
"from fastai.imports import *\nfrom fastai.torch_imports import *\nfrom fastai.transforms import *\nfrom fastai.conv_learner import *\nfrom fastai.model import *\nfrom fastai.dataset import *\nfrom fastai.sgdr import *\nfrom fastai.plots import *",
"_____no_output_____"
],
[
"??tfms_from_model",
"_____no_output_____"
],
[
"??tfms_from_stats",
"_____no_output_____"
],
[
"PATH = \"/mnt/DATA/datasets\"",
"_____no_output_____"
],
[
"sz = 224\narch = resnet34\nbs = 24",
"_____no_output_____"
],
[
"!ls {PATH}",
"201801-IDH\t\t\t 201801-IDH-jpeg-train\r\n201801-IDH-jpeg-binary-test\t 201801-IDH-jpeg-validation\r\n201801-IDH-jpeg-binary-train\t 201801-IDH-test\r\n201801-IDH-jpeg-binary-validation 201801-IDH-test-cache\r\n201801-IDH-jpeg-test\t\t process_excel_output\r\n"
],
[
"??tfms_from_model",
"_____no_output_____"
],
[
"tfms = tfms_from_model(arch, sz)",
"_____no_output_____"
],
[
"data = ImageClassifierData.from_paths()",
"_____no_output_____"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code",
"code"
]
] |
e7fce98a1a180ff75904c9e8d44fdad8f9f357ef | 1,264 | ipynb | Jupyter Notebook | tests/notebooks/ipynb_maxima/maxima_example.ipynb | sthagen/mwouts-jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 11 | 2018-06-15T12:12:11.000Z | 2018-08-25T14:01:52.000Z | tests/notebooks/ipynb_maxima/maxima_example.ipynb | sthagen/mwouts-jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 33 | 2018-06-17T01:16:10.000Z | 2018-08-30T16:09:02.000Z | tests/notebooks/ipynb_maxima/maxima_example.ipynb | sthagen/mwouts-jupytext | 3b1eaa21d3d139444bdc278a0b696c363838e085 | [
"MIT"
] | 1 | 2018-07-20T06:52:12.000Z | 2018-07-20T06:52:12.000Z | 16.205128 | 32 | 0.47231 | [
[
[
"## maxima misc",
"_____no_output_____"
]
],
[
[
"kill(all)$",
"_____no_output_____"
],
[
"f(x) := 1/(x^2+l^2)^(3/2);",
"_____no_output_____"
],
[
"integrate(f(x), x);",
"_____no_output_____"
],
[
"tex(%)$",
"_____no_output_____"
]
]
] | [
"markdown",
"code"
] | [
[
"markdown"
],
[
"code",
"code",
"code",
"code"
]
] |
e7fce9f6bd8f3ccc53d5cf98e49d9acf197f53ef | 363,171 | ipynb | Jupyter Notebook | _notebooks/2021-11-24-french_healtcare.ipynb | LuisAVasquez/quiescens-lct | 2a187da92aafd8ac1db0054c3bc10b01755b1fa3 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-11-24-french_healtcare.ipynb | LuisAVasquez/quiescens-lct | 2a187da92aafd8ac1db0054c3bc10b01755b1fa3 | [
"Apache-2.0"
] | null | null | null | _notebooks/2021-11-24-french_healtcare.ipynb | LuisAVasquez/quiescens-lct | 2a187da92aafd8ac1db0054c3bc10b01755b1fa3 | [
"Apache-2.0"
] | null | null | null | 732.199597 | 122,360 | 0.948352 | [
[
[
"# \"Settling in France: The healthcare system\"\n> The public healthcare system here is amazing\n\n- toc: true \n- badges: true\n- comments: true\n- categories:[\"french bureaucracy\", Nancy, going to France]\n- image: images/chart-preview.png",
"_____no_output_____"
],
[
"---\n\nIt is mandatory for all residents in France to register into the French healthcare system. \n\n---\n\nHere you can tell they really see healthcare as a human right. \n\n",
"_____no_output_____"
],
[
"---\n\nThe official [Campus France guide](https://www.campusfrance.org/en/registering-to-social-security) is very helpful.\n\n---",
"_____no_output_____"
],
[
"# Setting it up\n\nGetting everything set up is really slow, but after that everything is really smooth. In my case everything took around 5 months to set up, but for other students it got to 8 months.",
"_____no_output_____"
],
[
"## Steps and documents\n\nThe stages to get into the healthcare system are:\n\n- Initial registration\n- Getting a provisional number\n- Getting a definitive number\n- Applying for a *Carte Vitale*\n- Optional: Getting a *mutuelle* insurance\n- Optional: Applying for the *Complemaintaire*\n\n\nThe documents necessary along the way are:\n- Passport\n- Residence permit\n- Proof of registration at a University for the current academic year\n- Birth certificate in the original language, translated if possible\n- IBAN bank account number\n- A French phone number",
"_____no_output_____"
],
[
"## Initial registration\n\nIt is reallly important to do this ASAP after arriving to France, as the whole process is slown. Luckily, the initial registration completely online and really straight forward.\n\n1. Go to the [website for social security registration for foreign students](https://etudiant-etranger.ameli.fr/#/). \n2. Specify your situation, date of birth, and nationality.\n - Note: I have heard some nationalities, like Canada, have different registration processes due to agreements with France. In any case, the website is really user-friendly and will guide you in everything you need.\n3. As an LCT student with a scholarship, my situation was \"Student without employment\"\n4. Then you will be prompted to a list of necessary documents:",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 09-54-12.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAp0AAAL3CAYAAADIuv4CAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDA5OjU0OjE4IENFU1T9YGGzAAAgAElEQVR4nOzdeVxVRf/A8Q/7JqsICC6IgoIIimbuuWZq4r6lpmmL+Wg9uWaWYpj6y7Q009xLS33Q3BfUclfUBDdccUNEgUhlEdn5/UGcuHCBe4Erlt/369UrOfecOTNztu+dMzNXLycnJwchhBBCCCF0SL+iMyCEEEIIIf79JOgUQgghhBA6J0GnEEIIIYTQOQk6hRBCCCGEzknQKYQQQgghdE6CTiGEEEIIoXMSdAohhBBCCJ2ToFMIIYQQQuicBJ1CCCGEEELnJOgUQgghhBA6J0GnEEIIIYTQOQk6hRBCCCGEzknQKYQQQgghdE6CTiGEEEIIoXMSdAohhBBCCJ2ToFMIIYQQQuicBJ1CCCGEEELnJOgUQgghhBA6J0GnEEIIIYTQOQk6hRBCCCGEzknQKYQQQgghdE6CTiGEEEIIoXMSdAohhBBCCJ2ToFMIIYQQQuicBJ1CCCGEEELnJOgUQgghhBA6J0GnEEIIIYTQOQk6hRBCCCGEzknQKYQQQgghdE6CTiGEEEIIoXMSdAohhBBCCJ2ToFMIIYQQQuicBJ1CCCGEEELnJOgUQgghhBA6J0FnEc6cOYObmxtubm40bdr0me/f3d1d2f/NmzeV5UOGDFGW//zzz888X6Vx8+ZN3n//fZo3b07t2rVxc3NjyJAhFZ0tnXlejpG/v7+Sj02bNpUprVmzZilpTZw4sZxyKCrai3xcX+SyL1q0SCn7Bx98UNHZES8Qw4rOwLPQtGlT4uPjNV5/586dOsxNxdu+fTu3b98GoHXr1vj5+elsX6mpqYwYMYKoqCid7UOIf6qwsDCOHj0KQK1atfD396/gHIl/g2d5jxdCGy9E0Plv0q1bN7y8vACU/2tr27ZtHDx4EABzc3Od3pAuX76sBJzW1tZs2bIFBwcHDAwMdLbPilYex0i8GEJDQ1mwYAEA7dq1k6BTlItneY8XQhsvRNA5bNgwnjx5ovwdEhLC+fPnAbC1tWXAgAEq69vb23P37t1nmkdNDRo0qKKzoJUHDx4o/65evTqurq5lTjMlJQVzc/Myp6MrpTlGz3uZhBBCiLJ6IYLO//znPyp/z5o1Swk67e3tmTRpUqFtCgadjx8/Zt68eezZs4fk5GS8vb355JNPCn2DTEtLY82aNezatYubN2+Snp6Os7MzHTt2ZPTo0dja2papLEOGDOHEiRMABAYGMnjwYACePn3K999/z969e4mKiiIjIwMbGxuqVatG/fr1+eCDD7h+/TpDhw5VSW/27NnMnj0bgE6dOrF06dIS8xAWFsaqVasIDQ3lzz//xMzMDA8PD3r06MGgQYOUVsyC3RrCw8Nxc3MD4MMPP+TDDz8sch+LFi1i/vz5ALz++usMGTKEefPmER4ejqurK7t27QIgNjaW5cuXc+jQIaKjo9HX16d27dr07NmTYcOGFWpRvXHjBnPmzOHkyZPo6+vTokULpkyZwtixYwkPDwfgyy+/pG/fvgB8+umnrFu3DoA333yTgIAAJa2OHTty69YtAFasWEH79u2LPUa6KtPNmzeZPXu2UqaWLVsyZcqUEo6ieklJScydO5c9e/bw5MkTvLy8GDduXLHbpKSk8OOPP7Jnzx5u3bpFRkYGDg4ONG/enHfeeQd3d/dC21y7do2VK1dy+vRpYmNjMTQ0xNnZmVatWvHZZ58Bpav7QYMGcerUKSC37g0NDVm6dCkPHjzA3d2dyZMn07JlSw4dOsQ333zD1atXsbOzY8CAAYwdOxZ9fdVu7hcvXmTFihX8/vvvxMfHY25uToMGDRg+fDgdOnRQWbfgcTcwMGDNmjXcunULa2trevTowaRJkzA0NCQrK6tQvRw8eFC5PiwtLZV71MaNGwkKCuL69eukpKRgZWVFlSpV8PLyYsiQIRq1YpXmuGpzLztz5gz9+/cHcu+p//vf/5g7dy4hISGkpaXh6+vL5MmTadiwYaH97Nmzhw0bNhAeHk5SUhK2trY0a9aM0aNHU7duXZV13d3dycrKAnJb8/bs2cOOHTuIi4vDxcWF0aNHK9duacuuyb20SpUqxVc42l8XpSlbQcePHy/VPT4iIoI5c+Zw6tQp9PX1adWqFQEBATg4OKisp+vnm/j3eyGCzrJKTU1lwIABREREKMvCwsIYMWIEhw8fxtraGoCEhASGDBnCpUuXVLaPjIxk5cqV7N69m6CgIFxcXMo9j+PHjyc4OFhlWXx8PPHx8Zw7d055IJTVmjVr+Pzzz8nOzlaWJSUlERoaSmhoKHv27GHVqlWYmJiUy/4g98E/ZMgQMjMzAZR9h4eHM2zYMB49eqSyfnh4OOHh4Rw4cIDVq1djZGQEwJ07d+jXrx8JCQnKuvv27SM0NBRjY+MS86Gnp1deRSq3Mt29e5d+/frx+PFjZd29e/cSGhqqrKOpjIwMhg4dyoULF5RlYWFhDBs2jHr16qnd5tGjRwwYMIAbN26oLI+OjmbTpk1s376dRYsW0bFjR+WzzZs38/HHHytlh9yHWUREBJGRkUrQmV9p6n7Dhg0q12J4eDhvvfUWH3zwAV9//TU5OTkAxMTEsGDBAmxtbXnzzTeV9YOCgpg6daoSCAAkJiZy/Phxjh8/ztixY/noo4/U7vu7774jJiZG+fuPP/5gxYoVWFhYFPtlq6A1a9aoBNuQW+ePHj3i+vXreHp6lhh0lua4luVelpycTK9evUhMTFSWnTp1ikGDBvHzzz8r+c3JyWHy5MmFBrn98ccf7Nixg3379rFkyRLatm2rNo/vvfeeSh3fuXOHSZMmUa1aNZo1a1bqsmtyLy0p6CzNdaFt2crLtWvX6N27t8rbwODgYB49esT69euVZRX5fBP/HjJ6XQPJycnExcUxceJEPvzwQ+VhnpiYyObNm5X1AgMDlQvSx8eHzZs3c+jQIfr16wfkvmpW16paVpmZmezbtw8AFxcXtm3bRkhICNu3b2fevHl07doVU1NTfHx8CAoKonHjxsq2Q4cOJSgoiKCgoBJHcN64cYPAwEAlQPL392fVqlV8+umnmJmZAbldFxYtWgTA8uXLVR6wbm5uyr7y6kQTkZGRODk5ERgYyOLFixkwYABZWVmMGTNGCc769OnDb7/9xs6dO/H19QXgxIkTLFmyREln9uzZSsBpZ2fHrFmzWLx4MdWrV1fpBlCUgq1gZVFeZZo1a5YScNra2hIYGMjChQupWrWqRmXK76efflIezgYGBowePZolS5bQtWvXQg+aPAEBAcqDNa9Oly9fTqtWrQBIT09n3LhxSh4jIyNVAs569eoxd+5cfvzxR6ZPn15kIFCaur906RJ9+vRhxowZSpCQmZnJ/Pnz8fT0ZMaMGbRs2VJZf/Xq1cq/79y5owScenp6fPrppxw9epTVq1djZ2cHwLfffsvp06fV7js2Npbx48eza9cupaU7/z4MDAwICgpSaZVq3Lixcn388MMPwN+DGvX09Fi6dCknT55k7969LFu2jMGDB2Nvb19iPZTmuJblXpaamoqvry+HDh3i0KFDSh2npaWpfKH45ZdflIDT1taWZcuWcezYMT7//HP09PRIS0vjo48+IikpSe1+UlJSWLBgAdu3b1e5p+XVXWnKrum9tCTaXhelKZs6pbnHR0RE4OLiwvTp0+nTp4+y/NSpU1y5ckX5u6Keb+LfRVo6NfT1118r37jzvq0CygjBlJQUduzYoaz/5Zdf4uHhAcAXX3zBjh07SE1NJSQkhKioKKpXr15uedPX18fExISnT5+SlpbG3bt3MTc3p27dunh7e9OrVy9l3SZNmmBlZaX8Xa1aNZo0aaLRfrZs2aK0+ri7u/P111+jp6dH27Ztefr0KfPmzQNyHybjx4/H19dXpZuCubm5xvvKz8jIiPXr16t8gz5+/LiStr29PXPmzFFeO0+dOlVp2Q0KCuKDDz4gNTVV6VgPMH36dLp37w5Ao0aNaN26tUrLmzrlGXSWV5kOHDigUqa8gShNmjShdevWKq10JdmzZ4/y7379+jFhwgQAXn31Va5evarS0g+553z+bWbMmEG3bt0AaN68OS1atCAxMZHk5GT27t3LgAED+OWXX5R6dnR0ZOPGjVhYWAC5o2yHDRumNm+lqftWrVoxd+5cIDcIXLx4MZAbfKxcuRJHR0eaN2/Oq6++CsC9e/fIzMzE0NCQX375Ram7zp07M2LECCA3EBk+fLjSTSIoKEjtlGrt2rVTuvWMGzdOmTorMTGRhw8fYmdnR5MmTTh79qyyjZWVVaHrI6+fb05ODlFRUbi4uFCzZk3c3d2LbCUrqDTHtaz3spkzZyrLAwMDle4PV65c4c6dO7i6uvK///1PWf/DDz9UyjNkyBCCg4M5ceIECQkJ7N27V+1r5TFjxijX8IgRIwgNDQX+vieXpuza3EuLUprrojRlU8fS0lLre7y5uTnr169XXo2fPn1aGfx5+/ZtPD09K/T5Jv5dJOjUgKGhIW3atFH+dnR0VP6d13J29+5dMjIylOWvvfZakelFRESUe9DZs2dP1q9fT3x8PGPHjgVyH66enp707NmT4cOHlzloyutDB7mtMvlfeea/qcXExPDkyRMlmCgrX1/fQq9s8j8s4uPj1fYbBLh//z4pKSnExMSoBJX5WwIcHR2pVq0ad+7cKTYf5fl6XRdlyn8MnJycqFatGpGRkRrnKf+6+dPS09PDz8+v0AP67t27Re7f3NwcLy8vTp48CaC0+uRPo1WrVhqfI6Wp++bNmyv/rly5svLv2rVrK9dw/uVZWVlKn8L8r0WDg4OVvpYFXb9+Xe3y/HWRPwCA3P7hea2lJenduzdHjhwBcgO5PDVq1KBjx46MGTMGGxubYtMozXEty73MwsJCZZmrqyumpqakpqYCKEFn/joOCAgo1I0gjyZ1nNfFCVBpPdS27OVxLy3NdVGaspWXhg0bqvTFdHBwUILOvP1V5PNN/LtI0KkBGxsblZuMoeHf1ZbXL6wgS0vLItPLf/GWlxkzZuDh4UFwcDBXrlwhMTGRrKwspS9geno6o0aNKtM+iiqrruUP8tUxMDAoduT306dPS8x7UZ/nD3YKtho+fPiw2DSL8yzKpC1t0yvr/ksKJMta9/mDvfyDr7Qd7GBiYlJkn9+igo/813/BgV/a1Ju/vz8WFhYEBQVx4cIFYmNjgdwgYNWqVdy4caPEV65lPU7a3ssKLsvOzi7xnmdubl7kNGr5+48Xla/8xyF/eUtT9rLeS8vjutSkbOWl4Beg5/H5Jv49JOgsJzVq1MDIyEi54DZv3kzt2rULrZf3+q68GRoaMmzYMOX15MOHD9myZQtffPEFAPv371dulPlvYkXd0NXJX57Q0FBycnKUwCDv9Q/ktrKVVysnqA9O6tSpo/zb3t6eY8eOqX1o5dW3hYUFhoaGSgvEmTNnlFfRMTEx3Lt3T+2+8wcu+ftIhoWFlanVoSLLVJSaNWsqsw2cOXOG3r17A7kPnrCwMLXr599/aGgoXbt2BXJfMebvD5Z37ri7u7N3714Ajh07VmiqqPznlK7qXhP5z/VOnTqxcOFCteuV1CWjJJoEFB06dFBGyqekpBAWFsa7775Lamoqx44dIzU1tdh+htoe17Ley9LT0zl37pwyUv3s2bMqXxpq1qwJ5J7vefeNKVOmqPR9zZOdnV2mQEvbsoN299Ki9qntdVHeSnuPL0pFP9/Ev4ecHeXE3Nycrl27sm3bNgDGjh3LlClTcHd3Jz09nYiICA4fPkxYWJhOfvFo6NChtGzZEj8/P5ycnDAyMiIlJUX5PP9NP39Lz6+//oq3tzcmJibUqFGj0BQZ+fXq1YulS5eSnZ1NREQE48ePx9/fn9u3byv95QCVzui60qxZM6pXr05UVBSxsbGMHTuWUaNG4eTkREJCApcuXWLfvn1YW1sze/ZsTE1NadeuHfv37wdyWzOSkpKwsbFh2bJlRfZ9rFWrlvLv/fv3M2/ePCwtLVmxYsU/tkxF6dKlixIEbNy4kcqVK+Pt7c2ePXsKvYaE3HO+S5cuSl+vgIAAkpOTsbe3Z82aNUrXk0qVKtG5c2cg93Xx999/T2ZmJjExMQwYMIC33nqLKlWqEBkZyS+//MKWLVuAZ1v3BfXu3ZulS5eSlZXFrl27cHNzo3v37lhaWhIbG6tcxwMHDizT+Z7/1fiFCxfYv38/tra22NraUrt2bQIDA9HX16dNmzZUrVoVKysr0tPTVYLdkoKK0hzXst7L8qZEy8nJUSa/h9yBY3nHtW/fvkq+5s6di5mZGU2bNsXQ0JDIyEhCQkLYunUrP/30E9WqVdOkOstcdtDuXqpOaa6L8lbae3xRKvr5Jv49JOgsR9OmTePatWtcvXqVq1evqh0UoavpJC5fvszx48eL/Dz/aPHWrVsrA6FCQ0OVEbQBAQEqU8YUVKdOHT777DM+//xzcnJy2Lp1K1u3blVZp1mzZowZM6YsRdGIoaEhCxcuZPjw4SQkJBAcHFxomhNQDYCnTJnCqVOnSExM5NGjR8pI2sqVK+Pk5KQyRUmebt26MXfuXP744w+ysrL47rvvgNx+T7a2toWmNnpeymRtbY2DgwNxcXEa73/IkCFs3bqV8PBwlbLq6elRu3Ztbt68WWib6dOnc+XKFW7cuEF8fDwff/yxyufGxsbMnz9feQi6uroya9YspkyZQlZWFpcuXVIGd+Stn+dZ1n1Bbm5uzJgxg+nTp5OVlcXChQvVtnaqGwSijebNmyutYg8fPuS9994DcvvMLV68mAcPHhAcHMzKlSvVbt+tW7cSf1SgNMe1LPeySpUqYWBgoHJcIffYBgYGKn/379+f06dPs2XLFhITEwutXx5KU3Zt7qVF0fa6KG+lvccXpyKfby+qrKwsLl++zOXLl4mOjlamIbOyssLFxQUvLy+8vLz+Ub/wJ1MmlSNbW1u2bNnC1KlT8fPzw9LSEkNDQ6pUqYK3tzcjR47kq6++0sm+R40aRbt27XBxcVH6R9na2tKiRQu+/fZblVdX3bt3Z9y4cbi4uGh9sg4bNoygoCC6du2q/JxlpUqV8PPzIyAggLVr15brHJ3F8fX1JTg4mLfffht3d3fMzMwwMTHB2dmZl19+mfHjx/P+++8r67u6urJx40batWuHubk5FhYWtG/fno0bNxY59YyZmRkrV66kadOmmJiYKJN8b926VScPjLKWydzcnLZt2xIUFKR1i4axsTFr165l8ODB2NnZYWxsjLe3N0uXLqVdu3Zqt7Gzs2Pr1q1MmDABb29vzM3NlYne+/Tpw44dOwqNsu7bty/btm2jb9++VKtWDWNjY8zNzXF3d+eNN95Q1nvWdV/QG2+8webNm+nZsycuLi4YGRlhYWFBzZo16dixI4GBgYUmiNeWs7Mz3377LV5eXmrnVe3Vqxfdu3fHzc0NKysrDAwMsLCwoEGDBkyaNEmj+0lpjmtZ7mWmpqYEBQXRu3dvbGxsMDU1pWnTpqxbt05lAJ+enh7z5s1j8eLFtG3blsqVK2NgYICNjQ3u7u707duXb7/9FicnJw1rs3zKrs29tCiluS7KU1nu8UWpyOfbi+jixYvMnTuXtWvXEhoaSkxMDCkpKcog0tDQUNauXcvcuXO5ePFiRWdXY3o5FTU6RIjniL+/v9pfJBJClKzgLxIVNX+pEKJ4OTk57Nq1S5m1wtHRkaZNm+Lh4YGdnR05OTnKj0Pk/aIbQJs2bejWrVu5zrKiC/J6XQghhBDiObBz506OHj2KgYEBr7/+Oi1atCgUSDo6OuLo6EjLli05ceIEO3fuVILU119/vSKyrTF5vS6EEEIIUcEuXLigBJwjRoygZcuWKgFndna2ysBBfX19WrVqxVtvvYW+vj5Hjhx57l+1S9AphBBCCFGBsrKy2L17N5DbJ1fdj4N88sknfPLJJ4WW161bV2nh3LVrl9YzlzxL8npdCGD79u0VnQUh/rGaNGmi8otlQgjtXL58mYcPHyo/z6utFi1acOrUKWJjY7l8+TINGjTQQS7LTlo6hRBCCCEq0OXLlwF4+eWXixwMNGfOHObMmaP2M319fZo2baqS1vNIgk4hhBBCiAqU9wty6l6ra8rDwwOA6OjocsmTLkjQKYQQQghRgfImfrezsytynaL6dObJm784L63nkfTpFEIIIYR4zpX0k7fP+xydIC2dQgghhBAVysrKCqBMP+/78OFDlbSeRxJ0CiGEEEJUoLzfrb9+/Xqp08jbNi+t55EEnUIIIYQQFah+/foAnD59mtL8OnlOTo7y87NeXl7lmrfyJEGnEEIIIUQF8vLyws7OjpiYGE6ePKn19idOnCA2NhY7OzsJOp8HixYtws3NDTc3Nz744IMyp5eYmMjnn39O27Zt8fDwwM3NrUxTHfzTubu7K/V78+bNis7OP1p5n6u68k/J57+dv7+/chw2bdpU0dkRQpSCgYEBXbt2BXJ/rOTGjRuF1pk1axazZs0qtDwiIoKdO3cC0K1bNwwMDHSb2TKQ0eul9OmnnyoH+d8uNjaWDRs2AGBkZMTo0aMrOEdClJ/FixeTkZEBwMCBA3F0dKzgHAkhXkQ+Pj60bt2ao0ePsnLlSvz9/WnWrJkyKl1fX7WdMCcnh5CQEHbs2EFWVhZt2rR5bn+JKM8LE3T6+fnx9ttvA3/3nSit7Oxs9u/fr/z9/fff07Jly3/EdAWlERcXx4IFCwAwNzdXG3S+/fbbyu+95s0VJkqnPM9VUbLFixeTkpICQPv27SXoFEJUmLzfUD969ChbtmwhJCSEpk2b4uHhoTxbHz58SEREhPKzlwCtW7emW7duFZZvTb0wQWeLFi1o0aJFuaT16NEj0tLSlL9btWqFubl5uaRdUFpaGkZGRoW+4TxvJk+eXNFZ0LmUlBSdHef8yvNcFc+XZ3UOCSH+mfT09OjevTs1a9Zk9+7dxMTEsH379iLXt7Ozo2vXrvj4+DzDXJbe8x3JlKOi+p+dOXNGWd60aVOioqL46KOPaNy4MfXq1aNv376EhYUp63/wwQe89NJLKml7e3vj5ubGoEGDlGXR0dEEBATQvn17PD09qV+/Pp07d+bLL78sNA9XwTxER0czduxYGjVqhKenJ8nJySxYsEAl/6GhofTt2xdPT09eeeUVfvjhBwBu377NyJEj8fb2plGjRowbN46EhASV/e3atYuRI0fyyiuv4Ovri7u7Ow0bNqRPnz788MMPSoslQL9+/ejRo4fyd0pKipIPNzc35Tdei+vTqU1dFDxOp06dYvDgwdSvXx8fHx9Gjx5NXFxcicdbXb3evn2b0aNH06hRI7y8vBg0aBDnzp0rdv+nT59mwIAB1K9fn379+inrpaWlsXz5cnr27EmDBg2oW7cu7dq144svvlAp08GDB5X0OnbsWCiPkyZNUj7/6quv1OYhv5SUFJYsWYK/vz/e3t7UrVuX1q1bM2nSJCIiIlTW/eOPP1SOVf587dixQ1nu7++vst3GjRvp16+fcm40btyY1157jXHjxqlcC+oEBAQo6X7xxRcqn+Xk5NCiRQvl84J1r46m9QzaX8vz5s3Dzc1NaeUE6NGjh5LGqlWrlOWxsbHMnDmTjh07Kuewv78/q1atUrleQPNzSJs0AW7evMnbb7+Nt7c3Pj4+vP/++9y9e7fEOhRC/PP4+PgwceJEhg4dip+fH46OjpiZmWFmZoajoyONGzdm6NChTJw48R8TcMIL1NKpieTkZHr06MHjx4+VZWFhYYwYMYLDhw9jbW2tUTphYWG89dZbJCUlqSyPiIggIiKCrVu3smHDBmrUqFFo26dPn9K/f38ePHigLCv4KwRXr15lyJAhSmtrVFQUn3/+OXFxcWzYsEEl/1u3biU1NZXFixcry44ePcrBgwdV0kxMTOTs2bOcPXuWM2fOsGjRIo3KWpKy1MXp06fZvXu3SvmDg4N59OgR69ev1yofycnJ9OrVS+XnwU6dOsWgQYP4+eef8fPzK7TNxYsXGTJkCJmZmcDfxyEhIYEhQ4Zw6dIllfUjIyNZuXIlu3fvJigoCBcXF9q0aYOTkxMxMTHcunWL8PBwvL29AUhPT2fv3r1A7rfb/v37F1uGR48eMWDAgEIdzKOjo9m0aRPbt29n0aJFaoPbvH2UZM2aNQQEBBTa76NHj7h+/Tqenp5q6yrP0KFDWbNmDQBbtmxh8uTJGBrm3mbCwsKIiYkBoE6dOjRs2LDYvGhTzwWV17UMEB4ezrBhwwoFueHh4YSHh3PgwAFWr16NkZFRoW2LOoe0TfPu3bv069dPpTx79+4lNDRU7X6FEP98BgYGNGjQ4Lnvp6mNF6alUxOpqak4ODiwYcMG1q5dS+XKlYHcgGzz5s0A/Pe//2XZsmUq261Zs4agoCACAgLIyMjgv//9rxJkeXt7s2TJEhYsWED16tUBiImJYfz48Wrz8OTJEx4/fsxHH33EkiVLmDhxIsbGxirr3LhxAx8fHwIDA1UCgO+//x49PT2mTJnCkCFDlOXBwcHKwx6gXbt2fPfddwQHB3PixAlOnDjB2rVrcXJyAmD37t1KC+bMmTOZM2eOsq2pqSlBQUHKf66urkXWZ1nrIi4uji5durB9+3ZmzZqlBE2nTp3iypUrRe5XndTUVHx9fTl06BCHDh2iZcuWQG5L2meffaZ2m8jISJycnAgMDGTx4sUMGDAAgMDAQCUQ8vHxYfPmzRw6dEhpxXrw4AGTJk0Ccm8affr0UdLcunWr8u+DBw8qdfPyyy+rDbzzCwgIUAJOOzs7Zs2axfLly2nVqhWQG8SOGzdOJTDJT5MuGnmD4/T09Fi6dCknT55k7969LFu2jMGDB2Nvb1/s9rVr11a6Bjx8+FCl7/OePXuUf+evk6JoU88FaXItDxo0iKCgIExNTZXt5syZo5zb3bp1IysrizFjxijBYZ8+ffjtt9/YuXMnvr6+QO5UJUuWLFGbD3XnUGnSnDVrlnJcbS9DjCYAACAASURBVG1tCQwMZOHChVStWlXlC6oQQjzPpKWzgK+++kppieratStr164Fcl9bA7i5uWFpaamyjZ+fn9JP6+jRo9y7dw/IDTiWLVumBHPOzs7KAzM0NJTIyEhq1qxZKA9z5syhe/fuRebRysqKH374ATMzM2rWrMmbb76pfBYYGEjXrl3JyckhODiY+Ph4Jf95+ejQoQObN2/m66+/JioqipSUFLKzs1VaAc+ePYuXlxd169YlPT1dWa6vr0+TJk1KrEeAkydPlqku7Ozs+OqrrzAxMcHb25vVq1crr5Bv376Np6enRvnIM3PmTCXYDQwMpH379gBcuXKFO3fuFAqgjYyMWL9+vUpLWkpKCjt27FD+/vLLL/Hw8ADgiy++YMeOHaSmphISEkJUVBTVq1enX79+LF68mJycHHbu3Mknn3yCvr6+SgBaUitnSkqKStA2Y8YMpdN48+bNadGiBYmJiSQnJ7N3714lQM5Pk2k08s7jnJwcoqKicHFxoWbNmri7uxfZglrQm2++yYkTJwAICgqiS5cuQO6Xn7x89OrVq9g0SlPPBZV0LTs7O+Ps7KwSjHt6eqq0Khw/flx5hW1vb8+cOXOUepw6dapy3IKCgtROG6XuHNI2zdTUVA4cOKBsP336dKVLRJMmTWjdurXa1/FCCPG8kaAzn7zgJk/+V3AF+0UWJX9/xurVqytBFkDDhg0xMDBQHhA3btwoFGjp6+vz2muvFbsPX19fzMzMAJQWnDzNmzcHcluq7OzslKAzL//Z2dm8+eabJU4+q2l5i1PWumjQoAEmJibK3/mPR1GteUWxsLBQCUxcXV0xNTUlNTUVQG3Q6evrW+jV7d27d5XpdYBij1VERATVq1enRo0aNGvWjJCQEOLi4ggJCVFaXSH3S0RJx/zu3bvKK1pAJfA3NzfHy8tLOabq5ncDzV6v9+7dmyNHjgC5QXqeGjVq0LFjR8aMGYONjU2xaXTo0EFpgTt69CgPHjwgNjaW+/fvA7mjLB0cHIpNozT1nF95XMt5aeeJj48vci7e+/fvqx0kpO4c0jbNmJiYIo+9k5MT1apVIzIyUuMyCSFERZGgM5+CLZj5W4Y0/Vmq0vx8VX62trYl9tHK/wAt2HqVf7qi/HnJ+/fhw4eV4MTc3Jzly5fTpEkTjIyMeOuttzh8+DBQuB9paZS1LsrjeOTJH8BAbvkKLitIk6lzCuaxqH3279+fkJAQALZt28aDBw+UPrn+/v4qr3jVKU1dFgwy8wcuDx8+VLuNv78/FhYWBAUFceHCBWU6jrt377Jq1Spu3LihDForioGBAYMHD+arr74iOzubjRs38uTJE+Xz/INpNKVpPRe1flnOnfxpFDfy/OnTp4U+L+kc0iTNsl5HQgjxvJCgs5zVqVNH+XdUVBSxsbHKg+f8+fMqr8Fq165daHtdz/WZ92oRclth8lpG09PTlX6cxeVJm2C0rHVRntLT0zl37pwyeOXs2bMq+1fXzUHdsahRowZGRkZKoLN582a1ec/MzFQG0EBuS9306dNJTEwkODiYO3fuKJ+V9Go9L3+GhoZK4BgaGqr8ekVKSopKH9e8/FhZWaGnp6cELTExMVhZWQGovK4tqEOHDnTo0EFJOywsjHfffZfU1FSOHTtGampqiUHywIEDWbhwIenp6WzatEmpSxsbG41e05e2nkujuPM7/zlsb2/PsWPH1HZTKCof6s4hbdO0sLBQOfZnzpxRXq/HxMQoXVgKSkpKUgaqAfTt21ftekII8axI0FnOmjVrhouLC9HR0WRlZfHee+/x/vvvk56ezrx585T1GjduXOwgHF3J/6rv/PnzhISE4OTkxDfffMMff/yhdpv8raepqan89NNP1K1bFyMjo2JHID9vdfHhhx/y4YcfkpOTo0x2D1CvXj1q1aqlURrm5uZ07dqVbdu2ATB27FimTJmCu7s76enpREREcPjwYcLCwlR+scrExIQePXqwdu1akpOTOXPmDJD7e7v5XwMXt98uXboo/RwDAgJITk7G3t6eNWvWKK+MK1WqROfOnQEwNjbG2dmZ6OhoIHd6pn79+hESEsLRo0fV7icwMBB9fX3atGlD1apVsbKyIj09XaWVVJMvHnlzx23dulUlKPL399dotHVp67k0bG1tlZbYjRs3kpqaioGBAfXr16dZs2ZUr15d+dI0duxYRo0ahZOTEwkJCVy6dIl9+/ZhbW3N7NmzNdqftmmamprSrl07ZVDWjBkzSEpKwsbGhmXLlhXZnzMuLk5loJUEnUKIiiZBZzkzMjJiwYIFDB8+nOTkZC5cuMD777+vso6Tk5NK0PUstWnThho1anD37l1SUlIYPHgwkPuQr1WrlkpLaB4XFxfc3Ny4desWANOmTQNyH9ahoaFF7ut5qotKlSphYGDAhAkTVJYbGxsTGBioVVrTpk3j2rVrXL16latXrzJs2LBC66ibxqd///7KYJb8yzQ1ffp0rly5wo0bN4iPj+fjjz9W+dzY2Jj58+erfEkYPny4Ml/mhQsXuHDhAgB169bl2rVrhfbx4MEDgoODWblypdo8dOvWTePJzYcOHaoyWAq0C3xKW8/aatOmDevWrQNg3bp1yr+Dg4Px8PBg4cKFDB8+nISEBIKDg5UBUflpMho/j6GhodZpTpkyhVOnTpGYmMijR4+UGResra1xcHDQeO5aIYSoSDJlkg74+fmxZ88ehg4dSs2aNTE2NsbExAR3d3dGjRrFrl27SpweR1fMzMz46aef6Ny5M7a2tpibm9OyZUs2bNig8tqvoCVLltCyZUutf03leamLvKmeevfujY2NDaampjRt2pR169bRuHFjrdKytbVly5YtTJ06FT8/PywtLTE0NKRKlSp4e3szcuRIZaL3/OrXr6/ys5Z5rZ+asrOzY+vWrUyYMAFvb2/Mzc0xNDTE2dmZPn36sGPHjkKvrocPH86YMWNwdHTEyMgIDw8PvvzyS7U/ZQrQq1cvunfvjpubG1ZWVhgYGGBhYUGDBg2YNGmS2nIVpVGjRiqtuHXr1tWoVTdPaetZW5MnT6ZPnz7Y2tqqfR3u6+tLcHAwb7/9Nu7u7piZmWFiYoKzszMvv/wy48ePL/RlqiTapunq6srGjRtp164d5ubmmJub07ZtW4KCgkoclCWEEM8LvRzppS7+pc6cOaO0JNrb23P69OkKztGLZ9GiRcyfPx/InQ5o5MiRFZwjIYQQFUVerwshylV2djapqak8ePCALVu2ALmtur17967gnAkhhKhIEnQKIcrVpUuXCnUbeOedd1T6mgohhHjxSNAphNAJfX19nJyc6NevH2PGjKno7AghhKhg0qdTCCGEEELonIxeF0IIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk4hhBBCCKFzEnQKIYQQQgidk6BTCCGEEELonASdQgghhBBC5yToFEIIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk4hhBBCCKFzEnQKIYQQQgidk6BTCCGEEELonASdQgghhBBC5yToFEIIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk4hhBBCCKFzEnQKIYQQQgidk6BTCCGEEELonASdQgghhBBC5yToFEIIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk4hhBBCCKFzEnQKIYQQQgidk6BTCCGEEELonASdQgghhBBC5yToFEIIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk4hhBBCCKFzEnQKIYQQQgidk6BTCCGEEELonASdQgghhBBC5yToFEIIIYQQOidBpxBCCCGE0DkJOoUQQgghhM5J0CmEEEIIIXROgk5RyM6wu+gPXMHW3yOf6X4bTPiFlz7ZWqY0xq46gf7AFSSnZpRTrsS/wU9Hb6A/cAW/Xox+pvt96ZOtNJq8+ZnuU2gmOTUD/YEr+OjHkyrLM7Oy+WT977iO2YDBoBXYjlgDQJfZwVR97+eKyGqFyfirLmqN2YDRGyuVuhCitAwrOgPPSk4OzNl2ntBbfxB6K57I+GRc7CyIWjxIp/vtNHM3v4XfV/420NfDwcqMNp5OfNqnEfWr2ep0/0KU5PK9x2w8eYtBLWvjUdX6uU9X11LSMpm74wLNPRx51celorMjyqA05+DS364yZ9t5PnitPi3qOmJqbKDjXKpavO8yhgb6vNuh3jPdrzrf779SoXUh/n1emKAzKzubqRt+x66SCY1r2fPoSdoz3f+cN5pibKhPakYW5+78yaZTt9lzLoozs3tRx8nqmealJF0bVSdl7VsYG0pD+IvgcvQjZmwKo7GbffkGnTpKV9dS0jOZsSmM8a83KHPQefxz/3LKlSiN4s7BSqZGpKx9C0N9PZXl+87fw9HajG+GN1dZvmVCJ7Kzc3Se5yX7rmBqbPBcBJ2/XoxWWxdClNYLE3QaGuhzY+EA3BwsAXD/MIjUjKxntv/Rr3pSydRI+furnReZ9NMpFu+7zPw3mz2zfGhCX08PU6OSv9FmZeeQlpGFuckLcxoJoZUX7Ytb0tMMLM2MSl5Rx1IzsgoFk+qou8/FPH6KjYWxRuv+2xVVF0KU1gsVLeQFnM+D13yrMemnU0Q8SFBZnpaRxbxdF1l37AY3Y5MwNTKgVV1HPh/QhEaulVXWTc3IImBjKD8dvcHD5DS8qtkyrW8j9p+P5rt9l0n8YZgS6JoNXU3fl2uxdkxblTQ2nLjJGwsPsnvKa7zmWw3I7dPp/+U+No/vRM+XagK5feLe/O4QWyd24tiVGIJO3ib64RMWj2zJOx3qaZXvew+fMGHtKfaev0d2Tg4tPByZN1S7wLuoshflwt2HBGwM5ciVGJ6kZVKriiVD27gzsXsDDA1UA4MHj1OYtfkcu85Gcf/RE2wsTGhY046PezakrVdVreszr+6CP+nC2TvxLN1/lZjHKTSoYcfXw5rRwsORo1djmLrhDGG347EyM+L9V734rHfh8py5Fc8Xm89y7FoMSU8zcK1iyZDWdfi4h69KOdw/DMLJxoz5bzbjk/W/czIiDgN9PTr5VOPbt1rgZGMGwGf/O8MXW84B4P/lPmX7Ya+4s/r9V8jIyubL7RdYd+wGkfHJGOrrUa2yBe3rO7PwrRZF1ndJ6UJu37n5u8L58fB1bsUlYW5sSOt6Tszo3xjfmnZFpp1ffFIqARvD2H4mktiEpzhYm9G9cQ1m9GtMFSvTQutnZucwY1MYqw9dJ+ZxCnWdbZjaqyH9m7sBuS07r36xB4B5Oy8yb+dFAGraV+L2ooEA7Ai9y4+HrxN6K56YhKdYmBjSwsORGf0bFzrXX/pkK5lZ2Zz9v97KMk2PTVFaTtvOjZhE7i15A6MC5+7us1G8/n97+W5kS97v5KlVPZ+MiKPFZ9tZ9m5r3m5fVyXd91ccY+mvV0lZ+5YSfOWVY+aAJgRsCiP0VjzW5sYldlnS5PoCzc/1d5YeZeXBa0Qs6M+nG87wW/h9/kxOZVRHT5bsvwKoPweTUzOwGv4jH3bx5uthzZRrN4/+wBWF8u5obcaDpYNVloXeimfOtvMcufKAhJR0HK3NaONZlZkDm1DTvhIAX+8OZ2doJFeiH/NnUhoO1mZ0bVSdmQOaKOdpZlY2xoNXqd3/rW8H4FrFUqt6KYom50NRdTG9rx/T+/qVuA8hivJCBZ3Pk5uxiQDYVTJRlmVkZdNldjBHr8YwpHUd/tO5Pgkp6az47Sqtpu3gcMDrNHGzV9Z/Y+EBtv4eSe+mrnTyqcaduCQGLzyIm6PuXtePXXWC+tVs+fatFthYGGNXyUSrfD9+kk6b6TuJfviE0a964VnNhuNXY2gfuAtDfT2qGptrlA9tyn7mVjxtZ+zEyECf0a964mRjzq6wu0zd8Dtht+PZ+FEHZd3bcUm0nLadPxJTGf6KB41qVSY5NZOTEbEcDL+v8lDU1vSgUJ6kZfB2h7pkZGazYE84r80KZs1/2jLi+8MMf8WDXk1d+eXkbaYHhVLrr4dJnt1no+g9bz+uVSz5b9cG2FuaEnI9lukbQ7kQ+ZCgfOWA3HOs7/xfmdq7EbPfaMr5yD8Zt+Ykw747xN6pXQAY0a4uhgb6zNgUxhcDX6K5hwOAEvhMXHuKb/deYkTbunzUrQHZOTncik0k+Ny9YstaUroAQxYdIijkFp0auDCqkydxCU9ZtPcyLT7bzoFpXXm5jkOx+0hISaflZzu4EZvAiLZ18XOz5+zteJb+eoX9F6L5fVbPQq00U9adJjUjizGv1UcPWH3oGgMXHCA9M5shrevQ2M2eLRM60eur/fRv7saovwK3/K1cKw9cIy0zi7fa1cXZ1pw7cUks++0qrafv4MzsntRztik235oem6K806EeI5YcYfuZSPq8XEvls9WHrmNmbMgbLWuXWz0XJyImkb5f/8onvRoxc0AT/kxOLXZ9Ta8vbc91gA6Bu+n1kisb/tue1PQsqtqa42BtVuw5mF97b2cOTOvGqOXHeJqeyY//aavy+eSfT3M3Plll2fbQSPrN/w1LMyPebl8XN0crYh4/JfhcFFejHytB5/9tO8+rPi509q2OtbkxYbfj+fHwdY5fiyF0di9MjAww0NfnwLRujFhyBGNDfb5/p5WyHycb81LXS0GanA9F1YVrlUolpi9EcSTofEbuxj/B3MSAtIxszt6JZ/LPpwF4o9XfQcWivZc5dPkBGz/qoPIwGdXRE+8Jm5j88yl++6wbAPsuRLP190je61iPJW//fXPq0MBZaanRhSpWZuz6+DX08r25+np3uMb5nrvjPHf+SGLdB+0Y2CL3wfhuh3pKy1hV25KDTm3L/t8fQkjLyOJEoD8+NXK/yY/pXJ9BCw8QFHKLXWej6NaoOgD/WXmcmMdP2Te1Cx0bqPbny84pW3+uJ2kZnJ7VUwlgfGra0Wfer/Sd/ytHZrxOCw9HAN7v5InrmA0s3ndZCTpTM7IY+f0RfGrYcXRGd0z+SuO9jvXwqmbLx+tOc+RKDG08nZT9xTx+yqHpryvLmrjZc+/PJ8zYFMbV+4+p52xDLQdL6lfPHczWoIZtoaB6y+936O5Xk+XvtVZZPueNpsWWtaR091+MJijkFv2bu7H+g/bK+dSvuRuNP97C2FUnOD2rZ7H7mLv9AhExCSwa0YLRr3opyxu6VmbMqhPM2nKWL4e8rLJNfFIaF+f2UYLRdzrUo9HkzXy0JoS+zWpha2FCy7q5x6F6ZQu1XzLWjmlb6BXy8LYe+Ez8ha93hbM0X7BQFE2OTVEGNHdj3JqTrDp4XeV6+yMxle1nIhnYwg1r89zylUc9Fycu4SlbJnSiR5OaGq2vyfVVmnMdoHdT10JdlfK+3Ks7BwtysDLDwcsMSzMj9PQotL5dJROVoPNJWiYjvz+CbSUTwub0wjnfvWtan0Yq94uIb/oXOmfaeDoxdNEhfjl9hzda1lb2aWFiiKmxQaH9l7Ze8tP0fCipLoQorRerw1EF8p6wCbex/8Nz3EbeWHiQrOwcfvxPWzr/9QoW4OejN6jlYEk3vxqkZmQp/5kaG/C6Xw2OXo0l7a9+qNt+vwPA5B6+Kvvp2MAFv1r26Mrwth4qAae2+d76eySuVSwZ0Ly2ShoTuvugXzDhImhT9rjEp5y4Hot/45pKwAmgpwdTezXMzdPp3PQeJqex98I9ujSsXuiBCGicv6K8/6qXSovZK565N/Imte2VgBNyW9VerlOFiJi/u17svxBNbMJTRnXyIgdU6nnQX61aBacDqmFfqdAD6KXaVQC4EZOoUZ6tzY05H/knF+4+1LygGsir8096NVQ5n3xq2NGjSU3O3Irn3sMnxaax5fc7VLEyLTTg4t0O9XC0NmOLmim/3utYT6X108rMiFGdPPkzKY2jV2M0ynv+4CE7J7dfc7XKFvjWrMypiDiN0ijLsTEzNmRwyzrsPX9PpY7WHo0gIyubEe3+fjVeHvVcHHtLU40DTk2vr9Kc64BKuZ+FfRfu8WdSGuO6NVAJOPPkv1/kP2cys7JJzciia6Pq6OvpcVrDc6a09ZKfrs8HIUoiLZ1auPfwCZlZ2crfRgb6uNhZaLTtug/aYWpkyOOUNNYfu8nhKw8KrXMl+jFP0zMxH7q6yHQePkmjqo05t+KSMDM2VPr55OdVzYaw2/Ea5UtbtdT0i9U23+28qhYKXK3NjdXeuNXRpuy345IAlFa3/DxdbDDQ1+NWXO5D/kZMIjk50NBVs/6E2ipYd3ldK2qpKYddJRP+TPp7hoUr0Y8BeHvpEd5eekRt+nEJT1X+rqbm3LT66+GXP+3izB3yMoMWHqDhpM3UcrCkXf2qdG1Ugx5NamKgwUCNotyOS0JfTw9Pl8Iteg1q2LH59B1uxSapLUP+NJrUrlKoH5uhgT71XGw4djWGnBxUzjV1+/P6a9qyW7FJ0ECzvM/YFMb+C9E8eJyi8ln1yprdD8p6bN7pWI/v9l3mh0PX+fSvvr+rD17H3cmaV/K1SpVHPRdHm9etml5fpTnXQf29SZciHuTeNxoW6MerzuHLD5iz7TwnI+JISElX+UzTmVRKWy/56fp8EKIkEnRqofW0HUTme71S29GKiAX9Ndr2db8ayqCeN9u4023OXt5ZegS/WpWVuTpzcnLwqWGn0penoMqVTJV1i6Luo6LCg8ws7V4Zm6gZjatNvgH0imgx1DQn2pRdmzfiOX/lQK/I2vpbaeqzqNG0mnT+z8vbguHNlRaxghysVfuqFdcwm6NhbXf2rcathQPZfS6KQ5fucyD8PqsOXuel2lU4OK1bqWcuKG7vxR1fjdMvIg115542+3v8JJ2W07aTkwOTevhSv5otlUwN0dfTY+zqE8Q8Lv6h/3c+iv5Mk2PjU8OOl+s4sPrQdab2asSpG3FcuveI2W+8VCCtYvZToNzF5amo89pEixHdml5fpTnXQf29SZeU8pRwuzh2NYaOM3fT0LUy899shmsVS8yMDcjJgTYBO9B0FqbS1otqGsV8Vg7XnRAlkaBTC0vfbU1KWqbydyXT0lWfvp4eC99qTv3xvzDpp9Ps+rgzAO5VrYmMT8avln2JU63UdrRi34Vo7vyRVKjF7+r9x4XWt6tkwsPkwt+o8/o8lYU2+XZzsORK9ONCLVAJKek8eJRS4shd0K7sbo65n1+KelQonav3E8jKzsHNIXfwkbuTNXp6cC7yzxLzoMv6VMfdKXeOwZT0TJq5l37ghzolhdg2Fsa80bK2Mjjl/7afZ8q639lw4hYj2nmUKt3ajlbszbnHlejHKt0eAC7dyz2GeceuKG6Olly7/5jMrGyVwD0rO4dr9xNwrWJZKCC4fO8RvZu6qiy7ej9BZX/FBUV7zkUR8/gpm8Z1LJROfGLxg2jK2zsd6vH20iMcvHyfDcdvYmigz7A2qsdDm3rOa3nX2X1Cw+urPM/1snWIKV7evJ/n7vxJJzXdBfL8dPQG2Tk57JnyGvaWf3/5vh2XRJaaiLOoILY86qU8rjshykL6dGrhVR8Xer5UU/lPXb8kTbk7WTOohRt7zkVx8q8+PUPbuJOQks60oDNqt8n/Ks//r35Uc7dfUFnnwKX7hN4q/Grdo6o1IRGxJD79++chHz1JY/lvV0tdhjza5LtHk5rc+SOJjSdvqawzf+dFjQfqaFN2ByszWng4siPsLuH5As+cHJj115Q+Pf8KHuwqmdDZpxp7zkVx8FLh7g/5s6fL+lSns281HKzN+GrHBaL+LNzn6klapkpetGFpltvHsWCwkZOT27JXUN5o55JeCxaVLkCPv6bimrP1vEq9hkc9YtuZSJq42Zf4iq9nE1f+SExlxYFrKsuX/3aV2ISn9HqpcF/D5b9dVamn5NQMvt93mcqWJrSul9vH0sLUED099fnOCwjyd7MB+PmvKaWepQEt3LAyM+LbPZf4X8gtujWqXuhLmzb1XKNyJUyMDNh3XnVmgpDrcRy5oll/1+Joen2V57le3DlYVq/6VKOypQnzd15U28KdV56/zxnV+9ucbefVpmtpZqQ2v+VRL+Vx3QlRFi9US+eaIxHK6MNHT9LIzMpm5uazADjbWhTbaqMLU3o15OdjNwnYGErwJ134sEt9fr1wjy+3X+DUjT/o0jB3eo278Un8evE+1ubG7PtrOpXOvtXo3rgGS/Zf4Y/EVDr6uHAnLolFey/ToIYdFwsM/PhPZy/6ff0brwTsYHBrd5KfprP60HVqOVhy/1GKuuxpTJt8T/T3Yd3xmwxddIhTEXHUc7Hh+LVYgs/fo6qNZn06tS37N8Ob03bGTtoE7GB0Jy+cbHOnTNp7/h69m7oqI9cBvh3RgpbTdtB51h6Gv+JOo1r2pKRlcioiDk8XG2b0b6zz+lTHwsSQH0e/Qq+v9tNgwiZGtqtLXRcbHj9J50r0I7acvsPOyZ1pVa/okatFaeRaGSMDfb7acYG0jCwszYyo5WCJb83KOI/6mR5NatKwVmWq2phz7+ETvt93hUqmRoVa+jRN9+U6DnRq4EL/5m5sOHGTR0/S6OZX46+pWy5hZKDPtyOKngM0z0R/HzaevM2YVSc4e+dPGrpW5vydP1lx4Bq1Ha2Y8tdAsfzsKpnQbOo2Rravi55ebj/IW3FJrH7/FWWQl6mRAb41K7Pl9zt417Cjqo0ZFiZGdG9cg3b1nbG1MGHMqhNExCTiZGPGqYg4Np26jUdVa57kexOiaxYmhrzRqg7f/zUX5cj2hQfSaFPPJkYGvNO+Lov2Xqbn3P20867K7dgk1hyNwK9WZc6o+TKrLU2ur/I814s7B8vKwsSQZe+2ZsA3B/CZ+Asj29XFzdGSuMRUgs9F8WnvRnT2rUbPl1xZ+utVXv1iN+929ERPD3aciSTm8dNC86xC7pe6b3aH8/G60/jUrIy+HnRvXLNc6qU8rjshyuKFCjqX/3aVK0A6GAAAIABJREFU49diVZZNCwoFoLGb/TMPOus529DnZVc2nrzN8WuxtKzryM7JnVm8/wprj0Tw+S9hADjbmvNyHQfefMVdZfsNH7ZnWlAoPx+7wY6wu3i52PDT2LbsPx9dKPDq83It5r/ZjAW7w5m6/ndqOVgytXcjrMyMCtWJtowM9DXOt62FCUcCXmfC2lOsOHiNnBxoWdeRA591Y8A3v2m8T23K3sTNnuOf+xOwMZQl+6/wJC2DWg6WfDHwJSZ2Vx05UtvRijOzezLzl7PsPhvFj4cjsK1kgp9rZdp7Oyvr6bI+i9LZtxqnZ/Xk/7adZ8OJW8QnpWJbyYTajpZM9PdVO1hKE1WsTPlh9Ct8seUsY1efID0zm2GvuLPivTZ82MWbQ5cf8Gt4NElPM3CyMadt/apM6dmwxIEbRaWb98D/aUxbGtWqzI+HIpiw9iTmJoa0qVeVgH5+Gg3OsDY35tjn3QnYGMr20LusPnQdBytT3ulQlxn9G2NrYVJom9lvNOXk9VgW7gknNuEpHlWt+XlsO2X0b56Vo9rw3x9C+Ox/Z0hJy6SmfSW6N66Bo7UZe6a8xsfrTjN3e25LVTN3Bw581o0p639XaU1/Ft7tUI/v91/B2dacLg2rq11Hm3qe80ZTMrKy2XTqNvsu3KNRrcpsm/gq647dKJegU9Prq7zO9ZLOwbLq9ZIrRwJeZ87W8yw/cJXk1NxrpI2nkzJYp7NvNdaOacv/bTvPpJ9OYWVuTLdG1Vk7ph01/rO+UJpTezXi/qMUVhy4xqMnaeTk5E4Ob1HFslzqpazXnRBloZcjvYf/dcauOlHoF4mEEP8+l+89xnvCJj7p2ZCZA5tUdHaEEKJY0qdTCCH+ob7bewl9PT21r9aFEOJ580K9XhdCiH+69MxsNp++zfUHiSz77SqDWtZ+5nNUCiFEaUjQKYQQ/yCJT9N5Y+FBzE0Med2vBguGN6/oLAkhhEakT6cQQgghhNA56dMphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdD5nOs0czf6A1egP3AF3++/UmFpPCvHrsYoea363s9lTu+lT7bSaPLmUm//09Eb6A9cwa8Xo8uclxdRcmoG+gNX8NGPJytk+3+qF7XcQoh/N/kZTCHKSUpaJnN3XKC5hyOv+rjobBttRf35hLnbz/Nb+H0i45MxNtTH2dacprUdGNSqNp0a6Ga/FeHfXNZnca4IIYQuSdD5nOvX3I2GrpUBlP9XRBr/VMc/939m+0pJz2TGpjDGv95A86CzFNto40r0Y1p8tp2UtEx6NXVlcOs6ZGXncONBArvO3iUjK1ungVglUyNS1r6Fob6ezvaRp6LLWp7U1ZuuzxUhhNA1CTpL6UlaJhYmuq++dzvUey7S+KdJepqBpZkRxoYvdg+SWVvOkZCSzubxnej5Uk2VzzKzsomMT9Z5HkyNDHS+D3g+ylpWqRlZGOrrYWig/8zqTQghnpUXJuicufks04JCARjQ3I3hbT2YufksZ+/8iYmhAV0aVuPLIS/jbGte5Dbvv+rFZ/87Q+jteNydrDj7f70BiIxPZt6OC+w9H03Un8no6+tRq4ol3RvXYPzrPlS2NAFg19kouv/fXgDqOltzZX4/lTyO/P4Iqw9dB+CTng2ZObAJnWbu5rfw+wAsHtmSUZ08gdxXbf+37Txbfr/DrbgkMjKzsatkgmsVS/xqVeazPn442ZgBFJkG5AbPC/dcYvPp21y7n0B6ZhZVbcxpV9+ZCd198Kpmo6x77GoMbQJ2AuBobcaJQH+mBYUSfD6K5NRM/GpV5qshzWju4aBsc/bOn3y57fz/s3ffUVVcexvHv3QBUQRBsYPSIoIlYkWJYIkdr9ixRonJNcb7ajSxRY01GsUSe03ssbdcjQ2NJpDEkqiIilhQUVGkiNR5/yDM9dBBDiTh91nLlZw5M3v2zGzgOXvvmcPlu8+IfJFATEIyRvq61LIy49361Rjfrb56fgrj/rN4anywlYk+9alqYcryY9cIffiCf7nX4ttR79D4s72kpKap1yrDimPXWHT4D8KfxFLdsizvt3WmRsWy9F50nP2ftKNzwxoa6yvAwsN/8PV/r3IvKo6aFc34zKc+g1rbA/DD7xG0m3kEgAUHf2fBwd8BqFmxLLeX9sm27vnZJiU1ja8O/cHG06GEPY7FxFAfD6fKTOvVCLeaFnmen1uRMejoQKeG1bO8p6+nS+1K5TSWFXR/O3+6zbL/XuFieBTJqQo1KprSwa068/2aoKerQ9yrZMoN3sjod11YOKiput3Cw39w8Nc7XIuIJio2EevyxnRsUJ0ver+NVbkyeR5XURwrwC9hT5m5+wJnrz8iNiGZWlZmDPCow4RubujraX5g+TXsKXP2XSLw2kNevEyiUnljWjnb8EWft6lZsSwAxn7r6dnElm/+7amx7bZzt+i3+CSHP+1AB7dqAAxfeYa1J69zI6AXk7b9wvE/HhAV94qQhb7YmJtonLfc2srFeT2oOnILbetVZc/YtlmOcfzmIL48cJk/5vfU+HnOLDE5lQvhUZibGuJUJef1hBCisEpN6Hzd6WsP2fFTGIqS/vplYgpbfrzF+RuPCZ7VHYuyWUPQr7ef0vaLwySnpgGQ9ue250Mf03HO97x4maSx/pX7z7ly/znfnLnJ6c87Y2dtRge3alS1MCXiWTzXH7zg17CnNLKrCKT/wt8dFA6Ajg4Mecch12MY9PVpdv18W2NZ5IsEIl8k8PPNxwx5x1ENnTl5GvuK1p8f5FpEtMbyO0/j2HA6lK3nbrH94zZ0bVQzy7YxCck0/mwvz+IS1WXnQx/Tae733Frcmwqm6efwyr3nbD8fprFtSmqaen72/nKHX2b7UM7YINe65mVT4A2MDPT43LcRtlZm6nXKztz9l/h0SzDudayY174JLxNT+ProVbXO2Zm28zei4l7h18oeIwNd1p64zpDlp6lTuRwtHCvRyK4ie8a2xWf+MXo1s1ODfW69VfnZZsDSU+w4H0bbelV5v60zj18ksPS/V2k+eT8npnSkSR3rnIoHoE7lcvx04zFbf7zFwFb2ua5b0P1N2BLEvP2XqVutAv/p7Eql8saERcbw3c+3md2vMXq6OR/73H2XaOdalfZu1SlvYshvt5+y8XQoP15/xK+zfTAqRC9fQY/18IV79FhwjFpWZnzcsR4VzcpwPjSSqTt/5fKdZ+wY46Wuu//XO/h+dRwzYwPea+OIXaVyPIpO4PuL9wiJiFZDZ2F4zTiMT+NabPu4Da+SUrEsmzV059ZWypsY4tvUls1nb/EoOkHj5z4lNY1vztygmYN1roET0ufDNp+8n/Zu1TjyaYdCH48QQuSkVIbOR9EJTOjmxkfvuhD2OIZ+i09y92kctx/HMmPXBY0emQw3H8VQy8qMcV1dsS5XhojnL0lKSaP/kpNq4GxkV5FPu9cnMTmVSdt/4fbjWCKexTNo2SnOTOuCnq4Og1vbM3PPRQC2nL2phs5DF+6p5bR2tsm2VyZDcmoae/4MqDUrlmXnf7ypUsGER9EvuXI/moO/3sHYMO8/2qM3nFcDp1W5MnzR+20qm5uw9PsrHPs9gsTkVAYuPUXYkj5ZgnhCUgq1K5mx+//akpSSyoClp3j8IoHo+CQ2Bd5g9LsuALxVzZzV/h40tK2IRVkjDPV1efDsJeM3B3HiygNuPoph3cnrfNzRJc/65uZpbCI3F/fCxtwk1/WexSUybedvvG1XkcDPu6jD78PeccT5Pztz3O55fCK/zfHB2DD9R6Z/yzrU+Wg7y/57lRaOlahgakQLx0oAVLc0xfMtmzzrnNc2x36PYMf5MHo1s2PrR23Q+XN6n28zOxpN2MOodecImtU913182r0+e4PvMPjr08zbf4lWzjY0tK2Ih1NlHKuUL/T+fr75mHn7L+PlUoWD49trhMQ5/dzVbXNyY1EvzDJ90GjlXBm/pafYFRROvxa1cy/gDY/1VXIqw1YE4lrDgjPTuqj19/d24q1qFZiwJYjAa49o5VyZ+MQUhq0IpEJZI36b46MxGjLlXw1Iy/j0Wkg93Gvx1UDN3zlxr5I1XufVVoZ7ObHx9A02Bd7gk66u6vJDF+7xKDqBL3o3fqM6CiFEUSiVobOWlRkz+zRGRwcqmxsz0ac+/qvPArDzp7BsQ6ehvi4np3bS6NE4ejmC8CexQPrw3d6xbalqYQpAjYpl8Zh6AIAfr0dy81EMdSqXY8g7jszaexFFgW3nwvjSrwm6OjpsOXtTLXfoO4651l9PV4cyhnq8TEzhVXIqYZExlC2jj0t1CxraVsTPo06e5yA+MYXvfvpfT+mSIc3p1cwOgHfq2lDjw61ExycRk5DM7qBw3muTtU7rR7ZWQ7NvE1uWHb0KwI2HMeo6DW0r8iTmFUuOXOFqxHNevEwiJVXR6Bn+6cbjPOubl44NqucZOCE9WL1KTuXfHepqzPe0KleGoZ4OzP9z2DKzD9u/pQZOgGoWprxVrQI3Hr1447rnZO+fHyw+86mvEeJca1jQ7e2a7A4K5/6zeKr92eay41zVnEvzerDo8B8c+u2uxiOzWjpVZo2/Bw425Qu8v61nbwEws0/jLL2SeQVOQCNwpqSmkZKm0LFBdXR1dAi68bhQobMgx3rscgSRLxKY2acxCukhNEPfFrWZsCWIH36PoJVzZY5evk9UbCJz+rlrBM4Muvk54Fzk9fOeH80dKuFSvQLrT13XCJ3rTl6nbBkDejWzzbOMOpXLkbbtvTeuixBC5KRUhs56NSpo/lF9ba7ag+cveZmYgkmmm4Tc61hnGUILefC/YWlbKzM1cAI0tbdGX0+XlD+Hea9FRFOncjnsrM3wfMuGk1ce8jD6JSf+eIB7HWsOX7gHgLmpIf9qUivX+uvq6DCgZR1WHQ8h8kUCfQJOAOlhtH4tS/q3rMNH79bN9Y/hrcgYjSHolk6V1f8vW8aA+jUtOXX1YfpxZhp+h/RhvYzACVDhtZ7Q14fcp+74lRm7L+R6PM9fW7+wbK3yN7x5+3H6h4Ts5qw5Vs15+NEum55ny7JG3IqMzWcNC+7241h0dXRwzqZe9WpYsDsonLDI2FxDJ4CttRkBg5sRMLgZz+IS+fF6JBtOhbInOJyu845yYW56D25B9nfjUfr8Sdd8zCvNzumrD5mz7xI/3XicZWrK8/jCt4f8HmtGD/97KwN5b2VgtmU9fpEA/O9DlLae/GBrbVYk5bzn5cTHG85zNuQRLZ0q8zD6JUcu3mdwa3vKlnmz6StCCFEUSmXoTHytVyP9dc7z/zJk18OhFHJYbeg7jpy8kh7otpy9xb2oeLWnpV+LOho9ajlZMrQ5datXYHdQOJfuRBEdn0RqmsKvYU/5Newpr5JTmdDNLcft33BEkPImhhqv9V57tEvGeYmOT2L2vkvq8kWDmzHsHUdMjfTVeZXAGw9PAvmeA5jbrnJ7L6dH/hS2DeRHbiUXdr8WZY3o0qgGXRrVoMeCY+wNvkPgtUe0d6tWoP0pioIOOhSmj+9syCO8vzhM/VqWfDWwKbWszDA21ENRoNXnB9T50m8q92NN30nA4GY0rm2V7fbW5dPnRmasm58OzZxWSUnN+aCMiugJC34edfh0SzDrTobS0qkyG0/fICU1jWHZjFIIIURJKJXPk/kl7CkvE1PU12dCHqn/b2NukqWXE7L/Y+JctYL6/7efxPLg+Uv19c83H6u9nABOVf83p+xfTWwxN00PbbuDbqt3rAMMzeMGogwGerqM6lCXk1M68WztQCJXDWC+XxP1/f2/3Ml1+9qVzDB47e7cH69Hqv8fn5jCpTvP1NeZ58PlV/iTWPUc6Onq8O/2b6mPmQq68aRQZb4pW+v0HtHrD7MOi4c+yNqjWxCFiWC5bVO7UjnSFCXLjV4AV+6nL7OrVPhesvq10nuq7z+LL/D+HGzKk6YoXL77LMu6efn2zE3SFIUjn3ZgiKcD79S1oam9NZXNjUktqsSZSeZjta+c3qZfJqXQ1N4623921v87VoCL4VF57seirJFGT3+GW5Ex2axdMHm1rwqmRvRsUoudP4URk5DM+pOhuFSvkOfNZkIIUVxKZeh8FpdI9/nH+O7n23x54DKz/ryxB8C3ad5znzJ4vmWjDrmnpKbhM/8Yu4PC2XbuFoOWnVbXa+FYSf0jB+lD0/1bpM+7jElI5uyfobd+LUsa2lYkP9rNPMLc/ZcIvPaIsMexvEpOJf61IJ3XH++yZQzo+dqxfrT+HGtPXufgb3fp+dUP6hBnOWMDerjn/5y8ruZrQ96paQrL/nuV8CexfHngMnt/CS9UmW+qbb1qGBnosfT7KxrTC6JiE1l/OjSXLfNmWkYfHR2yDR2F2abbn8+anLP3kkYv7B/3nrPvlzu8bVcxz6H1A7/eJTYhOcvyhKQU9gWHA6jD6QXZX58/51xO3v4LSSmaIwV5dcJm9Bhm7v2b81qveGEU5Fjbu1XDurwx8w9c5l5UfJZt4hNTiPmzrHau1bA0M+Krg7/zKDohy7qvH6+DTXnO34hUt4X06QKrj4e8yaEB+Wtf73k5EZ+Ywr/X/ciNRy8KNF80MTmVn2481pg2JIQQRalUDq+/Vc2cc6GRWb5Pu2bFskz+V8N8l2Oor8uWj9rw7uwjxCQkE3zrCT2/+kFjnaoWpmz4oHWWbYe2cVRvvFGXFeAPxIXwp7l+H/gQz7x7TBcNasbF8CiuRUQT+SKB4SvPaLxvZKDHpn97Fvo5mhVMjejTvDbbzqXfdDJ6w3lGbzgPQINallzIR89RUbM0M2LKvxoycVswraYepG/L2rxKSmX18RBqVjQjKjYRnULeGFLGQA+3mpbsCQ7HpYYFNubGmBoZ0KVRjUJt07ZeVXo1s2PbuVs8j0+kU8Mafz7C6AoGerosGdo8zzrN23+JvotP0N61Gm61LClbRp+Hz1+y6+dwwp/E0sO9Fs0d0u+KLsj+mtpb83+d67Hg4O80/mwvPZvaUrm8MeFPYtlx/ja/z/9Xjo+L6t64Fit/CKHdzMOM8HZGRwcO/HKHR9EJGr3vBVWQYzU10mfjB63xmX+MemO/Y9g7jjhWNSc6PolrEc/ZExTOwfHtaelUGVMjfVaN8KD3ohO4jtvFsHccsatkxuOYV3x/8R6TejSg/Z/P3vyw/Vv4LjxO688P0N/DnriEJNafCsXW2kxjJKQw8tO+PJwq41zVnG/P3MRQXxc/j7wfHZVBHpkkhNC2Uhk661W3YOMHnkzYEsRPNx6jr6dLxwbVmdffvcABq5mDNZe+/Bfz91/mv5fvcy8qHh3ShyG7NKzJ/3VJf/5fZg1qWWoErzIGevRvmf87dsd3q8/pKw/4495zouISSUhKoYKpEW41LRju5aTeiZ4bq3JlCJrVnYAjf7D753CuP4gmKSUNmwomvFPXhrFdXKlbrUKe5eRmtb8HNubGfPfzbR7HvMKpSnnGd3PjUXRCiYROgE+7u1HOxIDFh68w7pufqWZpir+3MxXNyvDeysB8PW4qJ2vfb8XHG84zefsvvExMoWbFsrmGzry2+fbfnjSwtWTjqRuM/eYnTIz0aeVkw+e+DfN1Y8t8vybs/jmcU1cf8mNoJM/iEjErY0Dd6hX4pKtrlqcSFGR/Xw5oQv1alnx99Crz9l1CAWpYlqVroxq5hsf2btX45t+ezN13iU++/ZlyJoZ0alCdb/79DjU+3JrnMRXVsbZ3q0bQrO7M3XeJbefCeBr7igpljahdyYxxXd2oW/1/bd+ncS0CP+/MnL2XWH0ihLhXyVQ2N6GVc2WNG6/+1cSWrwY2JeDwH0zcGoyttRkTezSgnLGBxhSWwspP+3rPy4n/2/QT3RvXeqMvXhBCiKKmo2jzToi/kMzfLrR1dJsSrpH4q5m4LZjZey8RuqgXdSrn/JxUIf7Kvj56lX+vO8d/J777t/mueSFE6VAq53SK0i3z0wsgfZ7cmhPXsbM2y/XB/EL8laUpCiuOXcPO2gxvFwmcQoi/llI5vC5KtyMX7zNrzwV6NLGlqoUpd57EsvKHEJ7GvuLrYS3y9WgcIf5K7j+LJ/DaI/578R5/3HvOqhEe0o6FEH85EjpFqeNgU55K5Y1ZfOQPomITMTZMf9D9Gn8P9YYQIf5OfrrxmAFLTmJpZsS4Lq4MK4JvORJCiKJWauZ0CiGEEEKIkiNzOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonYROIYQQQgihdRI6hRBCCCGE1knoFEIIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonYROIYQQQgihdRI6hRBCCCGE1knoFEIIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonYROIYQQQgihdRI6hRBCCCGE1knoFEIIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonX5JV+Cf6NTVhwxZfpo7T+JyXW9Qa3vWj2xdTLUSQgghhCg50tOpBYO/zjtwuta0YOPpGwxZfrqYavXmQkNDsbOzU/85Ojri5eXF7NmziY2NLfb6REVF4e/vT4MGDbCzs2PdunVvXOauXbto06YNDg4OuLm5FUEt38zSpUtxcXEp0DY7d+7kwIEDRV6XtLQ0AgICuHDhQpb3BgwYwLBhw4p8n3kprut19+5dAgICePLkicbyly9fYmdnx+bNm7W273+a8+fPs2LFipKuRoHZ29uzevXqYt3nL7/8gp2dHb///nux7lcUjZSUlCJZ559EQqcW3H2ae+AEODWl898yeEJ6wFizZg0BAQG0atWKdevWMXjwYBRFKdZ6LF26lAsXLrBw4UL27dtHt27d3qi8qKgoJkyYQKtWrdixYwdbt24topoWnqWlJXXq1CnQNjt27ODgwYNFXhdFUXIMndWrV6dq1apFvs/cFOf1unfvXrahUxTcuXPnWL58eUlX42/B2NgYe3t7ypQpU9JVEQV07do15s2bR2RkZI7rREZGMm/ePEJCQoqxZiVLhtdLiLmpIaemdMZz+kE2nr4B8LcZand0dKRNmzYAdOjQgbS0NL755hsuX76cbW+ToiikpKRgYGBQpPW4efMmbm5ueHp6Fkl54eHhpKam0rNnzwL3LmrrGPv27Uvfvn2LtMzXJSUlYWho+MblzJ49uwhqUzBvcr2yk5KSgq6uLrq6peuzeE7Hra02LfKvbt26/Pe//y3pahSZ0tSmgoODiY6OZuXKlfj7+1OpUiWN9yMjI1m5ciVxcXEEBQXh5ORUQjUtXqXrt+tfTEbw/Lv2eGZo1KgRAPfv3wega9eufPTRR2zYsEEd+vzhhx8AOHPmDD179sTZ2RlXV1fee+89bty4kaXMDRs24O3tjYODA+7u7kyaNImYmBh1P3Z2dvz4448cP35cHe7PeD874eHhjBw5Ejc3N5ydnenevTvHjx9X3x83bhy+vr5q/e3s7JgxY0aO5eV2jCEhIQwfPlzdl6+vL8HBwRrbp6WlsXDhQpo2bcpbb71Fv379CA0NzTKEl93w+rZt2+jQoQN169bFzc2NTp06sXfvXgA6derEr7/+yrFjx9TzMmbMGABmzZqFu7s7586dw8fHB2dnZ2bOnAnAihUr6N69O/Xr16d+/fr4+voSGBio7jMmJgZ7e3sAvvjiC7Xs7du3A9kPr+fnWmfUKTg4mB49euDs7IynpycbNmzI8dxD3tcrt/YD/xsWX7t2LZ9//jnu7u44Ojry4sWLLPs6evQofn5+AHTu3Fk99jt37qjrKIrCsmXLaN68Oa6urgwZMoSIiAiNclJTU1mxYgVeXl44OjrStGlTZs6cSWJiYq7HmrHtypUradeuHU5OTjRq1IjBgwdz69YtALZv346dnR0vX77U2C4gIEDjg2Bux/2mbTpj+y1btvDOO+/w1ltv0aVLF86fP6+uM2PGDJYtW0ZsbKx6Hps3b57jccfFxTF16lTatm1L3bp1adq0Kf7+/ty+fTvLuiEhIbz//vs0bNgQZ2dnvLy8NHpUQ0NDGT16NC1btsTJyQlPT08+//zzLFOD0tLSWLRoEU2bNsXZ2ZnevXtz/fr1bOt39OhRevTowVtvvYWrqysffvghDx48yPa8HDhwAG9vb5ydnfnXv/5FaGgoiYmJzJgxg7fffptGjRoxadIkjfaQ0/D60aNH8fX1xcXFhXr16tG9e3f1WmVnxowZNGrUiKSkJI3lCQkJuLq6anxozM+1zu+5zK1N/dP169cPJycn4uLiWLlypUaP5+uB09nZmX79+pVgTYuX9HSWsMw9nn+X3s7X3bt3D4AKFSqoy06fPk1UVBSLFi3C0tISQ0NDzp49y9ChQ2natClLliwhISGBhQsX4uvry6FDh9Th2YULF7JkyRIGDBjAlClTuHXrFgsWLODKlSt899132NjYEBgYyEcffUTZsmWZNWsWAGXLls22fk+ePMHX1xcTExOmTZtGuXLl+PbbbxkxYgTLly+nXbt2fPbZZ7Rq1YrRo0ezbt066tSpk2N5uR3jtWvX8PX1xd7enlmzZmFqasq2bdsYMGAAu3btUgPkkiVLWLZsGSNHjqR58+ZcvXqVESNG5DlF4eTJk0ycOJGRI0fSqlUrkpOTCQ0NVX/Zr1u3jhF39L7/AAAgAElEQVQjRlC+fHn1vJiamqrbx8TEMGXKFCZNmoSTk5P6R+j+/fv06dOH6tWrk5qayokTJxgyZAgbN26kZcuWlC1bllOnTuHp6cmoUaPUwGdhYZFtPfN7rTPqNG3aNCZNmkTt2rU5ePAg06dPp1atWjn2Yud2vfJqP3p6emo5ixcvxtfXly1btqAoSrbX3MPDg4ULFzJmzBh1XwA2NjZqQFizZg21a9dmypQpxMXFMX/+fEaNGsXu3bvVcsaOHcuRI0fw9/fH3d2dsLAwFixYQEREBF9//XUuVx3GjBnDkSNHGDJkCC1atCAxMZGgoCAeP35M7dq1c902Ozkd95u0aYDAwEBevnzJypUrMTMz48svv2T48OEEBgZiYWHBqFGjSE1NZdeuXRw5cgQAff2c/wzFxcWRmJjIRx99hLW1NdHR0WzZsoUePXrwww8/YGlpCcAff/xB7969qVGjBpMmTcLGxobbt29rDFtGRERQrVo1OnbsiLm5Offu3WP58uVcuXKFnTt3qustWbKEJUuWMGLECFq1asWlS5cYPnx4lp/N7du38+mnn9KtWzdGjRpFQkICAQEB9OnThyNHjmj83F24cIFbt24xatQo9PT0mD9/PiNGjMDV1ZUyZcowZ84cbt68yYIFC7CysmL06NE5npPNmzczefJk2rdvz/z58zExMeHKlSvqh/7sDBgwgA0bNvD999/TtWtXdfmBAweIj49XR1Pye63zey4h+zZVGujr6zNw4EA2bdpESEiI2uMJaAROPz+/XH8G/nEUUeR0eq/O819mz+MSFbdPdpVAbfPv+vXriq2trbJu3TolPj5eefbsmfL9998rbm5uSpMmTZSEhARFURSlS5cuiouLi/LixQuN7X18fJTWrVsrKSkp6rKIiAjF3t5emTRpkqIoivLixQvFyclJ+c9//qOx7f79+xVbW1vl4MGD6rI+ffooI0aMyLPec+fOVerUqaPcvn1bXZaSkqK0a9dOadu2rbrs/Pnziq2trXLlypU8y8zpGAcOHKi0bNlSiY+PV5elpqYq7777ruLv768oiqLExsYqdevWVaZPn66x7YYNGxRbW1tl1apV6rIlS5YodevWVV/Pnz9fad68ea5169mzZ7bnZebMmYqtra1y7ty5PI9PURRlxIgRyvvvv6++TklJUWxtbZW1a9dmWbd///7K0KFD1df5udav1+nSpUsa5XXs2DFLG8gsu+uV3/YTHx+v2NraKr169cp1HxnOnj2bbdvIKKd79+5KWlqaunzXrl2Kra2tcufOHUVRFOXXX39VbG1tlU2bNmlsf+TIEcXW1la5evVqjvv++eefczzvGbZt26bY2tpqtDtFUZRFixYprq6uWeqb3XG/SZvO2L5Ro0Ya60VHRyu2trbKjh071GXz58/XqFNBpaSkKA0bNlQ2bNigLuvXr5/StGnTLMeflytXrii2trZKSEiIoijpP5suLi7KxIkTNdZbvny5xs/my5cvFTc3N+WDDz7QWO/hw4eKk5OTsm7dOnVZxnl99uyZuuz7779XbG1tNX6+FEVRPvzwQ8Xb21t9HRwcrNja2iqXL19WFCX9+tWrVy9fv/cy69+/v9K7d2+NZd26dVP69++vvs7vtc5O5nOpKDm3qdIkOTlZWbt2rTJu3Dhl2rRpyrRp05Rx48Yp69atU5KTk0u6esVOhtdLiG6fNRr/LIZt4vKdZyVdrXyZMWMGLi4uNGrUiJEjR2Jra8vatWs1Jru7urpSrlw59XViYiKXL1+mY8eOGj1NVapUwd3dnaCgIAAuXbpEYmJilpuCOnbsiIGBAT///HOB6xsUFISrqyu1atVSl+np6dGlSxdu3rzJs2eFO++ZjzE5OZnz58/ToUMHTExM1OW6urq0adNGHaIKDQ3l5cuXdOjQQaO8zK9z2ufDhw8ZO3YsJ06cKPBTA/T09GjSpEmW5deuXePf//43np6e1K1bFycnJ06cOEFYWFiByof8X+sMxsbGuLq6aiyrUaNGlmHK/Cho+2nRokWB95Edb29vdHR01NfOzs4A6jEEBgaio6ND9+7dNbZr06YNurq6/PLLLzmWfebMGQD69OlTJHWFnI+7sG369e1fX698+fKYm5sX6lpmOHr0KP369aN58+Y4OztTt25doqOj1baZlJREUFAQXbt21dh3ZikpKaxdu5auXbvSqFEjnJyc6NGjB4BaVmhoKPHx8XTq1Elj29d7BwEuXrxITEyMun2GypUr4+LikuV6NmjQQGMkKGOqSsbc+NeXP3z4MMdjuHDhAnFxcYVqC35+fgQFBXHz5k0Arly5wuXLl9Wh3YJc6/ycywyZ21Rpk9HjmTHUXmp7OP9U+o5YvLHhw4fTtm1bDA0NqVKlChUrVsyyTsawV4aYmBjS0tKwsrLKsq6VlZU61y86Olpd9jo9PT0sLS2znXOXl+jo6GzvAM/YR3R0dI7DxLnJfIwvXrwgJSWFDRs28M0332i8l5qaSlpaGgCPHz/OdvvszmNmbdu2ZcGCBWzZskUdqvHw8GDKlCkaoTonFSpUyHLDSEREBH379sXV1ZXPP/8cGxsb9PX1CQgI4PLly3mWmVl+r3WG14chMxgYGORrrmNmBW0/ma9BYZmbm2u8zhhCzDiGqKgoFEWhcePGWbZNS0vj+fPnOZYdHR2NmZlZroGqoHI67sK26QzZTU/Q19cv1LUEOHbsGO+//z6DBg1i9OjRWFhYoKury9ChQ9UyY2JiSE1NzXKjRmZz5sxhy5YtjBs3joYNG2Jqakp0dDS9evVSy8r42cxcVubXUVFRAHzwwQcaHzYgPZBl/mCXU/soX768xnIjI6Ncz1VGO8nrWLPj7e1N5cqV2bp1K5MnT2bLli1UrFiRdu3aAQW71vk5lxmK6mfs7+z1oXYdHZ1SGzhBQmexqW5pyorhLWlSx5ryJoYY9Ftb0lUqtBo1avD2228XaJty5cqhq6ub7SNnnjx5ov5SzvjvkydP1N4iSP+lFxUVleWXd36Ym5vnuF/QnIv6JszMzNDT02PAgAEMGDAgx/Wsra2B9D9cdnZ26vKnT5/maz8+Pj74+PgQFxfHjz/+yJw5c/D39y/0Xa4nTpwgPj6eFStWaASb+Pj4QpWX32utDdpoP0VVLwMDA/bv36/R+5shtw89FSpUIDY2lpcvX+YYPI2MjICsz/x70+fn5rdNa8vevXtxc3Nj6tSpGsszQh+ktzc9PT0ePXqUa1l79uxh8ODBDBkyRF2W+QadjJ/NzB9OMj7MZMj4nTF37lzq1auXZV/Gxsa51qWwMtrJo0ePeOuttwq0rZ6eHn369GH9+vV8+OGH7Nu3j0GDBqnhpyDXOj/nUmjKCJ46OjrZ/g4oLWR4vZikpinsDb7DiFVnSroqJcLIyAg3NzcOHz5MamqquvzBgwcEBwerPQNubm4YGRmxf/9+je0PHz5McnJytkPDeXF3d+fy5csadxunpaVx4MAB7O3tiyx0GhkZ0aRJE3766SeqV69O7dq1s/wDcHBwwMTEhO+//15j+8yv81K2bFnat2+Pr68vt27dUgOHoaFhgXqW4uPjMTAw0PhD+fz58yxD0Xp6eujp6WW5Azaz/F5rbdBG+4GsPZcF1bp1a5KTk7l+/Xq27SK3Nujh4QGQ63NIM27Munv3rsbyzMPfBZXfNl0QBWmfL1++zNIbeOrUKY279A0NDXF3d2f//v1Z7t7PoCgKr169yjLMe+jQIY3XDg4OmJqaqlMaMmR+3aBBA8zMzAgODs72nFSpUiVfx1dQ9evXp2zZsoV+Jm3fvn2Jj4/ngw8+4NWrVxqPY8vvtc7vuRRZ6evrl+rACdLTWWwePH/J6uMhOFYpn/fK/1Bjxoxh8ODBDBo0CD8/P/VuT2NjY3WouFy5cowYMYIlS5ZgbGyMl5cXYWFhfPXVV7i5udG+ffsC73fIkCHs3LkTPz8/Ro8ejZmZGVu2bOHmzZtF/s0oEydOxNfXF19fX/z8/KhSpQrPnz9XewEmTJhA2bJlGT58OEuWLMHU1JRmzZpx9epVvvnmmzyfE/nFF1+QkpJC48aNsbKy4v79+2zevJlmzZqpPRYODg7s2bOHH374gUqVKmFubk716tVzLLNly5bMnz+fxYsXM2LECB49esRnn32W7fBPnTp1OHLkCPXr18fU1JRq1aplG5jyc621QRvtB6B27dro6emxdetWFEXBwMAAR0fHfG//9ttv4+Pjw/jx47ly5QqNGzdGT0+PiIgIjh8/ztSpU6lZs2a22zZu3JjOnTszZ84cHj58SPPmzUlOTiY4OBgvLy+aNWtGgwYNqFatGnPnzmX27Nno6OiwZs0awsPDC3W8r8tPmy4IBwcHkpKSWL9+PQ0bNsTIyCjHZxR6eHgwZ84cTp48SfPmzbl48SJTpkzJ0uP76aef0rt3b3x8fHjvvfewsbHh7t27XLt2jRkzZqCjo0PLli3Ztm0bXl5eVK1alYMHD2o8XQDSP8i99957rF69GicnJzw8PLh8+TIBAQEaw+gmJiZMnDiRTz/9lNjYWDp06ED58uWJjIzk/PnzeHh4ZJkHWhRMTEwYP348kydPZvjw4XTv3h1TU1OuXbtGmTJlNHoes2NlZUW7du04fPgwnp6eVKtWTeP9/Fzr/J5LIbIjoVMUm5YtW7J+/XoCAgL4+OOP0dfXp0mTJowfP17jETpjxozB3NyczZs3s337dszNzenevTuffPJJoT4lWllZsXPnTubOncv06dNJTEzEycmJVatW4eXlVZSHiLOzM/v27SMgIIA5c+YQGxuLpaUl9erV0xiyynh0zPbt21m9ejWurq589dVX9OzZEzMzsxzLb9iwIZs3b+bQoUPExMRgZWWFt7e3+ixOgPfff5+wsDA+/vhjXr58Sbdu3Vi4cGGOZbq4uLBgwQIWL17M8uXLqVy5Mn379sXBwYHTpzWfHTtt2jSmTZvGwIEDSUlJYfbs2fTu3TtLmfm91tpQ1O0H0oc1p06dysqVK9m9ezdpaWmcPHky23mrOfnyyy+pV68eO3bsYMOGDRgaGlKtWjU8PT3znM+7cOFCnJ2d+e6779i0aRNmZma4ubmpN4Ho6+uzZs0apkyZQocOHTAzM8PX15dBgwaxcePGQh1zhvy26fzy9vamb9++LF26lOjoaCpVqsS5c+eyXXfQoEE8ffqUTz/9lBcvXmBvb8/06dOzfBmBi4sLO3fuZOHChXzxxRckJSVRtWpVevbsqa4zc+ZMPv/8c3x9fUlNTaVx48YsX75cffxXhlGjRpGWlsbkyZN58eIFTk5OzJ07l/79+2us16tXLypVqsSqVasYP348KSkpVKpUiaZNmxbJlxXkpH///lSsWJEVK1YwduxY9PX1qVOnDqNGjcrX9h07duTw4cPZfulEfq91fs+lEJnpKEoxf3dhKaDbZ02O7zlWKc8f83vmOKczbdt72qqW+Is7duwY/v7+HDhwgLp165Z0dYQQ/0Bjx47l3LlznDlzptQP9YriJz2dQpSACxcucPbsWXUO4pUrV9RvtJHAKYQoahcvXiQkJIR9+/YxYcIECZyiREjoFKIEmJiY8OOPP7Ju3Tri4uKwsrKic+fOjBs3rqSrJoT4B/L19cXIyAgfHx8GDx5c0tURpZSEzmJkZKCHob6e+v+KopCUkpbHVuKfyNHRkW3btpV0NYQQpUTm5+MKURJkTqcQQgghhNA6eU6nEEIIIYTQOgmdQgghhBBC6yR0CiGEEEIIrZPQKYQQQgghtE5CpxBCCCGE0DoJnUIIIYQQQuskdAohhBBCCK2T0CmEEEIIIbROQqcQQgghhNA6CZ1CCCGEEELrJHQKIYQQQgitk9AphBBCCCG0Tr+kK1Ac7ty5U9JVEEIIIYT4S6lZs2ax7k9HURSlWPcohBBCCCFKHRleF0IIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonYROIYQQQgihdRI6hRBCCCGE1knoFEIIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFqnX9IV+Cc6dfUhQ5af5s6TuFzXG9TanvUjWxdTrYQQQgghSo70dGrB4K/zDpyuNS3YePoGQ5afLqZavbnQ0FDs7OzUf46Ojnh5eTF79mxiY2OLvT5RUVH4+/vToEED7OzsWLdu3RuXuWvXLtq0aYODgwNubm45rjdgwACGDRuWZ3lLly7FxcXljeuVk7t37xIQEMCTJ0+yvGdvb8/q1avV1+fPn2fFihVaq4sQQoj/SUlJKZJ1/kkkdGrB3ae5B06AU1M6/y2DJ6QHrjVr1hAQEECrVq1Yt24dgwcPRlGUYq3H0qVLuXDhAgsXLmTfvn1069btjcqLiopiwoQJtGrVih07drB169Yc161evTpVq1Z9o/0VhXv37uUYOh0cHKhQoYL6+ty5cyxfvrw4qyeEEKXStWvXmDdvHpGRkTmuExkZybx58wgJCSnGmpUsGV4vIeamhpya0hnP6QfZePoGwN9mqN3R0ZE2bdoA0KFDB9LS0vjmm2+4fPlytr2DiqKQkpKCgYFBkdbj5s2buLm54enpWSTlhYeHk5qaSs+ePfPsnZw9e3aR7FObDh06VNJVEEKIUik4OJjo6GhWrlyJv78/lSpV0ng/MjKSlStXEhcXR1BQEE5OTiVU0+IlPZ0lKCN4/l17PDM0atQIgPv37wPQtWtXPvroIzZs2KAOVf/www8AnDlzhp49e+Ls7IyrqyvvvfceN27cyFLmhg0b8Pb2xsHBAXd3dyZNmkRMTIy6Hzs7O3788UeOHz+uDvdnvJ+d8PBwRo4ciZubG87OznTv3p3jx4+r748bNw5fX1+1/nZ2dsyYMSPH8rIbXr927Rp9+vTBycmJZs2aERAQQFpaWpZtnz9/zuTJk2natCmOjo54e3tn6VU9ceIEdnZ2nD17lpEjR+Li4oK7uzuTJ08mISEBgKNHj+Ln5wdA586d1fNw584dQHN4fcaMGSxbtozY2Fh1vebNmxMVFYWjoyNr1qzJUs+lS5fi5OTE8+fPczwPQgghsurXrx9OTk7ExcWxcuVKjR7P1wOns7Mz/fr1K8GaFi8JnSUsc/D8O7p37x6AxlDu6dOnOXbsGIsWLeLUqVO8/fbbnD17lqFDh2JsbMySJUuYOXMmYWFh+Pr6EhERoW67cOFCpk+fTvPmzVmzZg0jR45k7969DBo0iNTUVGxsbAgMDKR+/fq0bNmSwMBAAgMDKVu2bLb1e/LkCb6+vly9epVp06axbNkyLCwsGDFiBEePHgXgs88+IyAgAIB169YRGBjIqFGj8n0OXrx4Qf/+/Xn27BkLFy7kiy++4OzZs2zbtk1jvfj4eHr16sWxY8cYNWoU69ato23btkyePJlNmzZlKXfChAm0a9eOEydO8OWXX7J//34WL14MgIeHBwsXLtSoc2BgINWqVctSzqhRoxg4cCCmpqbqert378bS0pIOHTpkCb1paWls376dd999V+O6CiGEyJu+vj4DBw7MEjwzB04/Pz/09UvPoHPpOdK/sNeH2v8OkpKSePnyJYmJiQQFBbF69Wqsra1p2LChuk5aWhrLly+nXLly6jJ/f3+qVq3Khg0b0NPTA9J7ST09PVmxYgUzZswgJiaGVatW4ePjw/Tp0wFo1aoVFStWZPTo0Xz//fd06tSJatWqYWRkhLGxcbYh63Xr168nOjqanTt3UqtWLQBat25Nx44dmT9/Pu3ataNChQpYWVkBYG1tnWeZ2e0jNjaWQ4cOYWNjA0CzZs3w8PDQWG/Tpk2Eh4ezf/9+nJ2dAWjRogUJCQkEBATQv39/9dwADBw4EB8fH7VePXv2ZP/+/YwfPx5jY2MqVqyYrzqbm5tjZmaGrq5ulvX8/Pzo1asX58+fp1mzZgAEBgYSERGhhlohhBAFkxE8N23aREhICCtXrgQotYETpKezxOj2WaPxz2LYJi7feVbS1cqXGTNm4OLiQqNGjRg5ciS2trasXbuWMmXKqOu4urpqBM7ExEQuX75Mx44dNUJVlSpVcHd3JygoCIBLly6RmJiY5aagjh07YmBgwM8//1zg+gYFBeHq6qoGTgA9PT26dOnCzZs3efbszc/7b7/9RqNGjdTACWBiYoKXl5fGeqdPn8bZ2VkNnBm8vb15/vw5YWFhGsubNGmi8bpGjRo8fvyY1NTUN65zhrfffhsnJyeN3s4tW7Zgb2/P22+/XWT7EUKI0iZzj2dpDpwgPZ2iEIYPH07btm0xNDSkSpUqam/b6ywtLTVex8TEkJaWpvYmvs7Kykqd1xkdHa0ue52enh6Wlpa8ePGiwPWNjo6mTp062e43430LC4sCl/u6x48f4+jomGV55snjUVFR3L59O8uk8Yw7/zPPn8w8ZUBfX5/U1FRSUlI0wvub6t+/P9OnTycqKork5GROnjzJpEmTiqx8IYQorV7v8dTR0Sm1gRMkdBabd+tXZ0J3NxrUsiQ2IZkdP4XxybdBJKdmvdHkr65GjRoF7gErV64curq62T7a58mTJ5ibmwOo/33y5IlGb2BqaipRUVHq+wVhbm6e436BIpmzaG1tnW0gzgjRr9fF1dWVL7/8MttyqlSp8sZ1KQwfHx/mzp3Lzp07SUxMxNDQUB3WF0II8WYygqeOjk6Rdhj83cjwejGpamHC0u+v4PDxDjrM/p6O9WswvlvODx//pzEyMsLNzY3Dhw9rDA0/ePCA4OBgdRjZzc0NIyMj9u/fr7H94cOHSU5OzjLcnB/u7u5cvnxZvasb0uecHjhwAHt7+yIJnQ0bNuTChQvEx8dr7OPHH3/UWK9169Zcv34dAwMDateuneWfsbFxgfZraGgIpE9fyM+6Oa1nYmKCj48P27ZtY/v27XTq1EljeoQQQog3o6+vX6oDJ0hPZ7FZc+K6+v+PohPYcDqUZg7WJVij4jdmzBgGDx7MoEGD8PPzU2+eMTY2xt/fH0jvER0xYgRLlizB2NgYLy8vwsLC+Oqrr3Bzc6N9+/YF3u+QIUPYuXMnfn5+jB49GjMzM7Zs2cLNmzeL7Bt6Bg8ezKZNmxg7dixTp07F0NCQRYsW8fjx4yx1OXDgAH369GHYsGE4ODiQkJBAWFgYQUFBBf5Wpdq1a6Onp8fWrVtRFAUDAwMcHR3VMPo6BwcHkpKSWL9+PQ0bNsTIyEhjmH/AgAF88803QPpwuxBCCFGUJHSWEPc6Vlz6m9w4VFRatmzJ+vXrCQgI4OOPP0ZfX58mTZowfvx4jW/3GTNmDObm5mzevJnt27djbm5O9+7d+eSTTwr1KdHKyoqdO3cyd+5cpk+fTmJiIk5OTqxatSrLjT6FZW5uzrfffsu0adNo3bo15cqVo0uXLgwdOlQjSJqamrJz504WL17Mxo0befToEeXKlaN27dp06dKlwPu1sLBg6tSprFy5kt27d5OWlsbJkyepWbNmlnW9vb3p27cvS5cuJTo6mkqVKnHu3Dn1fXt7e+zt7TEwMMj1K0CFEEKIwtBRivu7C0sB3T5ZH7T9ukGt7Znm24j643cTHZ+k8V7atve0WTUhcnT37l3atGnD9OnTS9XDioUQQhQP6eksZp0aVmdOP3faTD+UJXAKURIePXrEnTt3WLx4MRYWFnIDkRBCCK2QG4mKkZdLFdb6t6Lz3P9yLSI67w2EKAbfffcd/fr149GjR+ocWyGEEKKoyfC6FmQ3vN7CsRL7xrWj96LjnL2e/h2saWlKlkcmyfC6EEIIIf6JJHQKIYQQQgitk+F1IYQQQgihdRI6hRBCCCGE1knoFEIIIYQQWiehUwghhBBCaJ2ETiGEEEIIoXUSOoUQQgghhNZJ6BRCCCGEEFonoVMIIYQQQmidhE4hhBBCCKF1EjqFEEIIIYTWSegUQgghhBBaJ6FTCCGEEEJonX5JV6A43Llzp6SrIIQQQgjxl1KzZs1i3Z+OoihKse5RCCGEEEKUOjK8LoQQQgghtE5CpxBCCCGE0DoJnUIIIYQQQuskdAohhBBCCK2T0CmEEEIIIbROQqcQQgghhNA6CZ1CCCGEEELrJHQKIYQQQgitk9AphBBCCCG0TkKnEEIIIYTQOgmdQgghhBBC6yR0CiGEEEIIrZPQKYQQQgghtE5CpxBCCCGE0DoJnUIIIYQQQuskdAohhBBCCK2T0CmEEEIIIbROv6Qr8E906upDhiw/zZ0ncbmuN6i1PetHti6mWglReo0YMYJnz56xefNmjIyMSro6QghRKklPpxYM/jrvwOla04KNp28wZPnpYqpV0dq7dy92dnZ06dKlSMvduXMnBw4cKNIyAQYMGMCwYcOKvNwMd+/eJSAggCdPnhRZmUuXLsXFxaXIyssPRVHo2rUrK1asUJetXr0aDw8PAAICAmjfvn2By9X2+c/Npk2bCAkJYeXKlRqBUxvXLIO9vT2rV69WX5dk+3NxceG7774DwM7OjiNHjqjvLV++nK5du6IoitbqJkRplZKSUiTr/JNI6NSCu09zD5wAp6Z0/lsHz127dgFw5coVrl+/XmTl7tixg4MHDxZZecXl3r17RR5gLC0tqVOnTpGVlx/79u0jIiKCgQMHFmm51atXp2rVqkVaZn6EhISwZMkS1q5di6WlpcZ72rhmOdH28Rf2WAYNGkRERMTf8mdOiL+ya9euMW/ePCIjI3NcJzIyknnz5hESElKMNStZEjpLiLmp4d82eD569Ijz58/j6ekJ/C+AiqLVt29f9u7dW6z7XLVqFT4+PpiYmBRpubNnz2b69OlFWmZ+ODk5ERwcjL29fbHv+3Uldfx5MTExoWvXrqxataqkqyLEP0pwcDDR0dGsXLky2+AZGRnJypUriY6OJlTA5eUAACAASURBVCgoqARqWDIkdJagv2vw3LNnD2lpaXz88cc0atSIffv2kZqamud2SUlJzJ49Gw8PDxwdHWnUqBF9+vThjz/+AKBTp078+uuvHDt2DDs7O+zs7BgzZgwAY8aMoWvXrlnK7Nu3L/7+/hrLDh8+TNu2bXFycqJdu3Yaw4mve/78OZMnT6Zp06Y4Ojri7e3N1q1bNdaZNWsW7u7uBAcH06NHD5ydnfH09GTDhg3qOkePHsXPzw+Azp07q3W/c+dOtvtt3749I0eOzLL84sWL2NnZcfToUSD74fVt27bRoUMH6tati5ubG506ddIIpseOHWPgwIG4u7tTt25dOnTowPr16/N1fS5evEhISEi25zk7jx8/xt7eno0bN2Z5b8WKFdjb2/Ps2TMg6/ByXm0ho4zu3btTv3596tevj6+vL4GBgfmqW2pqKitWrMDLywtHR0eaNm3KzJkzSUxMBHK/Znfu3MHOzo4TJ05olPnTTz9hZ2fH1atX1WVpaWksWrSIpk2b4uzsTO/evbPt+c98/HFxcUydOpW2bdtSt25dmjZtir+/P7dv39bYThvtL7MuXbpw5coVjXMvhHgz/fr1w8nJibi4uCzBMyNwxsXF4ezsTL9+/UqwpsVLbiQqYRnB03P6QTaevvG3uLFo9+7d1KlTB1dXV3r06MHEiRM5c+aM2vOZkwULFrB9+3bGjx+Pg4MDMTExXLhwgdjYWADWrVvHiBEjKF++PLNmzQLA1NS0QHU7f/48o0aNwsvLi0mTJvHs2TO++OILUlNTqVu3rrpefHw8vXr1IjY2llGjRlGrVi3Onj3L5MmTSU5O1hhejomJYdq0aUyaNInatWtz8OBBpk+fTq1atfD09MTDw4OFCxcyZswY1q1bpw6J29jYZFtHHx8fFi5cyIsXLyhfvry6fO/evZibm/POO+9ku93JkyeZOHEiI0eOpFWrViQnJxMaGqqeP4Dw8HA8PDwYMmQIZcqU4ffff+err77i2bNn/N///V+u5+706dMYGxtrnCcALy8v7OzsAHj33XepX78+ANbW1rRo0YI9e/YwaNAgjW327t1L69atsbCwyHZfebUFgPv379OnTx+qV69OamoqJ06cYMiQIWzcuJGWLVvmeixjx47lyJEj+Pv74+7uTlhYGAsWLCAiIoKvv/4612t2//79XMt+3ZIlS1iyZAkjRoygVatWXLp0ieHDh+c5RzIuLo7ExEQ++ugjrK2tiY6OZsuWLfTo0YMffvhBYyrAm7a/+fPnq9c0ICCABg0aaNTF1dUVIyMjTp06VexziIX4p9LX12fgwIEac8ozOkheD5x+fn7o65eeKFZ6jvQv7PXg+Vd38eJFbt26xbhx44D0npXp06eze/fuPEPnb7/9hpeXF3379lWXvR6wKlWqhKGhIcbGxlSrVq1Q9QsICKB27dqsWLECXd30jnxbW1t69OihEaY2bdpEeHg4+/fvx9nZGYAWLVqQkJBAQEAA/fv3R09PD0jvlZs1axaurq4ADBkyhO+++44DBw7g6emJsbExFStWBNKDWF5179atG/Pnz+fQoUPqJ9yUlBQOHDhAp06dMDAwyHa73377jcqVKzN27Fh1WYsWLTTWGT58uMbrZs2aoa+vz7Jly/jPf/6Djo5OjvW6ePEiDg4O6nFnyOg5A3BwcMDBwUF9z8fHhzFjxhAWFqauc/XqVUJDQxk1alSO+8qrLQB88cUXGq9btWrFw4cP2bx5c66h87fffmPfvn1MmzZN7QFs0aIFVlZWfPDBB1y7dg1nZ+cCXbPsxMXFsWbNGvr27cv48eOB9PMNMG/evFy3rVy5MnPmzNFY5u3tjbu7OwcPHtQI8W/a/jp06KD+f3Y3/unr6+Pg4MBvv/2W30MXQuRDdsETKLWBEyR0lhjdPmtKugqFsmvXLnR1denevTsAZmZmtG3blmPHjhEbG4uZmVmO27q5ubF161aqVKlC69atqV+/fpH+wCmKwsWLF3n//ffVwAlQv359qlevrrHu6dOncXZ2VgNnBm9vbzZt2kRYWJg6D9DY2Fj9g5+hRo0aPHjwoFD1tLGxoUmTJuzZs0cNnadPn+b58+f06NEjx+1cXV1ZtmwZY8eOpWPHjjRu3DjL+X769CnLly/n7NmzPHz4kKSkJNLS0khJSSE6OpoKFSrkWH5kZGSOvbM5adeuHSYmJuzZs0ftSd2zZw9mZmZ4e3vnuF1+2sK1a9dYtmwZf/zxB0+ePCE1NZXU1FQ13OYkMDAQHR0dtY1maNOmDbq6uvzyyy9ZrnthhIaGEh8fT6dOnTSWd+3aNc/QCenD4hs2bCA8PJznz5+jKArJycmEhYVprFfU7S87lpaWPH78uMjKE0Kkyxw8gVIbOEFCpyiApKQkDh06RIMGDTA1NSUmJgZIDx4HDx7k0KFD9OnTJ8ftx40bh5mZGQcOHGDZsmWULVuWbt26MWHChAIPo2cnOjqapKQkrK2ts7yXeVlUVBS3b9/GyclJY3nGsOjz58/VZdnVzcDAQJ0fWBg+Pj588skn3Lt3j+rVq7Nnzx5q1qyZZejzdW3btmXBggVs2bJFHabx8PBgypQp1KpVi9TUVIYNG8aLFy/4v//7P+zs7ChTpgynTp3SmM+Yk8TERAwNDQt0HMbGxrz77rvs27eP//znP6SlpXHgwAE6duyY6/Mw82oLERER9O3bF1dXVz7//HNsbGzQ19cnICCAy5cv51qnqKgoFEWhcePGWd5LS0vTuLZvIiOkVapUSWN55tfZOXbsGO+//z6DBg1i9OjRWFhYoKury9ChQ7NcJ220v8yMjIx49epVkZUnhPif14Onjo5OqQ2cIKGz2DS1t+brYS1wsClPYkoqx39/wIfrfuRJzN/nF/3x48eJjo7m119/Vef1vW7Xrl25hk4jIyNGjx7N6NGjefToEcePH2fmzJno6OjkeWevkZFRtjfDxMXF8f/s3XdcVeUfwPEPe0+ZioCgiAtRxL23ORIz07Iy09ZP22Zljqy0nGm7TLPUcg8cuffADS4EAUVQRET2hvv7A7lyuYx70asV3/fr5as49znnec5znvPc73nOc861trYGwNbWFmNjY2UwXFpqaqoyXUlaPz8/Zs+eXW5+tWvXrrQ8D6pfv35MnTqVDRs28NJLL7F79+5yHy4qKygoiKCgIDIyMjh8+DBffvklr776Ktu3byc6Oppz586xZMkSunS5Pze47AMxFbGzsyu37jQp09q1azl58iQ5OTkkJiYSFBRU6TpVtYU9e/aQmZnJjz/+qPIkfWZmZpXlsbW1xcjIiE2bNqlNFQAqnGdaumyg/v68jAzVV6GVXMikpqaqLE9JSamyjBs2bKB58+ZMnTpVZfmdO3eqXFcXUlNTKx0FF0I8mJLAU09Pr9x+qaaQoPMRuX4nk1E/7OdqYjqWpkbMHtmGeS+05flv9z3uomls7dq1mJmZ8csvv6jcvobih4vWrFlDbGws7u7uVW7LxcWF5557ju3btxMREaFcbmxsXO4ITp06ddi+fTuFhYXKEzY5OZmoqChlgKinp4e/vz8HDx7ktddeU64bFxdHTEyMyi32Ll268MMPP2BkZKRReatSMkKo6eiThYUFPXv2ZP369Tg7O5Obm1tloFaapaUlffr0ISoqinnz5lFQUKAMyEoH11D8NL8mvLy8CAkJ0bgMJdq2bYuLiwvr168nJycHNze3ckcZK1JeW8jMzMTIyAgzMzNlurt37xISEqKcv1iRLl268N1333H58uVKf7ygomPm6OiIoaEhsbGxKsvLvtbEx8cHCwsLDh48qDJCffDgwUrLB5CVlaXyEBnAvn37yMrKqnLd8mjb/sq6fv262vxgIcTDVVNHN0uTGnhE4pMziU8uDgrSc/KJS86kY0OXx1wqzd25c4f9+/cTFBRE+/bt1T53dHRkzZo1rFu3jrfffrvcbYwcOZI2bdrQqFEjrKysCA0NJSQkhDfffFOZxsfHh/Xr17Nr1y6cnZ2xtbWlbt269O/fnwULFjBnzhzGjBnD7du3mT59utpJ/NZbbzFy5Ei+++47XnjhBe7cucOECRPUbvW+9NJLBAcHM3z4cF5++WV8fHzIzs4mOjqa48ePs3jxYq3qx9vbGwMDA/78808UCgVGRkY0bNiw0tvVQ4YMITg4mK+//ppWrVqpzTst6/PPP6egoIDAwEAcHR2Ji4tj+fLlyoeFGjZsiJOTE9999x2zZs2iqKiI77//Xu01PBVp3bo1a9as4e7du1qNepXM8V2xYgUFBQWMHj260geWoOq20LFjR+bMmcPChQt55ZVXSEhI4OOPP9ao027VqhVBQUFMnDiRCxcuEBgYiIGBAfHx8ezevZupU6fi4eFR6TF74oknWLJkCYGBgbi5ubFjxw61d6ZaWloyZswYfvnlF3x9fenUqRNhYWEsWLCgyv3v1KkTX375JXv37qV9+/acPXuWKVOmVPv9qNVpfyWSkpKIj4+nTZs21cpbCCE0JUHnI+RWy4ITMwZjZWqEoYE+I7/d+7iLpLGSd3E+/fTT5X7u7e1NQEAA69at46233ir3S7d169bs3LmTX3/9lby8POrWrct7773HmDFjlGlee+01oqOjefvtt8nKyuLJJ59k/vz5eHl58e233zJ//nyWLFmCu7s748ePZ8WKFSp5tGvXjoULFzJ//nwWLlyIq6srY8eOVQs6LSwsWL16NQsXLmTp0qUkJCRgbW2Nt7d3tX7a097enqlTp/LTTz+xbt06ioqK2Lt3Lx4eHhWu07FjRxwdHUlISKj0Se8SLVu2ZPny5WzZsoW0tDQcHR3p2bOn8l2mJaPQ06dPp2PHjlhYWNC7d28mTZqkfLq6Mr169cLU1JTdu3czdOhQzXee4lvsJT+dWfYBnvJU1RaaNm3K3LlzWbhwIT/88AMuLi6MGDECHx8f9u+v+n22s2fPplmzZqxatYrffvsNY2Nj3Nzc6Nq1q3KktLJjNm3aNKZNm8bo0aPJz8+ne/fuTJs2jXHjxqnkM378eIqKipg8eTKpqan4+vry1Vdf8dxzz1VavhdffJGkpCQ++ugjUlNTadCgAdOnT2fmzJlV7lt5qtP+SuzZswczM7NKH/wSQoiHQU8hP7r70FX0ZLq+nh6O1qa41bLgpa4+/LI7nNBrySppiv4aU+66QjwKEydOVI6giprh2WefxcPDo9oBrxBCaEp+kegRKlIouJWazanoJH4/EMnKt3s87iIJoeLNN9/k9OnThIaGPu6iiEfg7NmznDlzRqORdiGEeFASdD4meujh4WD5uIshhIo6deowZ84c5c9Xiv+25ORk5s6dq/O3NQghBMiczkfm6bb1uH4nk/AbKdSxt2DWyNbsufDwXu4sxMNS9mXn4r+re/fuj7sIQogaROZ0CiGEEEIInZPb60IIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdAphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOGT7uAjwK165de9xFEEIIIYT4R/Hw8Hik+ekpFArFI81RCCGEEELUOHJ7XQghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdAphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdAphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdAphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdAphBBCCCF0ToLOf4k1ITHoD1/E5tOxVaZNSs9Bf/giJiwLUS7LyMlHf/gi3ll6TKP8tE3/KE1YFoL+8EWkZOZVexva1KcQj+t8GL/4CPrDF5GRk/9I8/2v+if3a+LBBH68gRYT1z229YVmDB93AR6VqFtprDgcxd9nrxN5M428gkK8nK0Z3t6bcX0aY26i+6qYuPw4s4PDaFjbhkvzntZ5flXJyi1gdnAY7Xyc6e1X53EXR/zDSXv5Z/t+x0UMDfR5pYfv4y7Kf5acAxXLzC3gh52X+OtwFDGJ6eQVFOJsY4afhz19mtfl1Z7SLkUNCjrnBIexaM9lOvq6MKpLA/T09Nh1Lp4PVxxn9bFoDn06EBMjA53lX1BYxB8HI/F2tubyjVQOX75Fh4bOOsuvLEtTI7L+eAlDfT3lsqy8Aj5dc5r3BjRT60DLSy9qtsray3/dv+F8+GHHJUyNDSTo1KGafA5UJie/kA6TNxEWm0znRi682a8JpsYGXE1MZ//FBE5EJek86Dw8fZBOty8ejhoTdAa1rsekIS1ws7dQLlMo4NmFe1h5NJrlh6IY3c1HZ/lvOXOdhJRsdk3uxrML97J47+VHGnQCmGoZVGubXoiHKT07Hyszo8ddDCVNz4d/WrmFeFCFRQpy8wsrvCO47OAVwmKTeatfU+a/2Fbt88s3UnVdRIwNZbbgv0GNCTrLuyrV04OXuzdk5dFoLsbd1Wn+i/dexsvJim6Na/NsB29+2R3O1y+2K/fL6de9l5kbfI7oxDTq2FvwSk9f6jlalbvdw5dv8eGK45yKTsLKzIjBgZ5MGOinli4jJx/rUUuVncKuc/H0/mIbAHM3n2Pu5nMAeDhYEvPtcLX0JQoKi5i35TxL90cQnZiOubEhnXxd+HRYAM097JXp4pIzcX/jTyYF+dOwti1zN4cRfiMVOwtjRnasz4wRgRga3O8kzsUm8+32ixy4dJO4O5kANHO3590BzRjapl41arx69ZmUnsO01afZdPIat1KzcbIxY2CAO58+HYCjtalK2tz8QuZvPc+fh6OIvJmKiZEBPq42jO7WUHlV32/m35y/fpfr349QWTchJZvary1n4qDmzHw2UK3OWnk7Mn3NaS7Fp1DbzpyPgvx5uVtD4pIzee/3Y+w6d4P8wiKebOXB9y93UGtHN1Oy+GztGbacjiUhJRsHK1MGBLjz2TMBOFmbKdON/ekgv+69zLXvRjB55Um2nIklI6eAgHoOzH2hDW3qOwFU2V4AVh6NZsHW81y+kUpOfvGttTb1HZnzfBvqlLrYK6v0ftext+CHnZeIuJnKU609WTa+m3Lb3/59gdBryRQWKWjmbsf7A/3KbRsVHe/hC/aw6YPeDGjprvWxKe98eJjlzskvZNrqUyw7eIXkjFwau9kxZWiLCuustILCIoyfW6z8W3/4IuX/R3/zDJ732vqGE9eYExxG6LU7KIDmHva8N8CPIa09NcoHYPWxGL7bfoGzV++QX6jA3cGCvs3rMuf5NhjcGwWuTh/xIO29pA1fmvc0k/46wZ4LN8grKKJtfSdmP9+GFp61NNq3qo6VJueAJtupyI6wePrO2Ma8F9ry9hNN1T5vM2kjcXcyif1+hLKuw2KTmbb6FAcuJZCZW0A9Ryue79yACQObqfSv2rT1ZQev8MJ3+9gwoReHLiWw6lgM8cmZfP9yB8ZWMIoefSsNgP4t65b7ecPaNmrLNC07FPdnM9adZcuZ69y4m4mthQn+HvZ8ONifro1dgeI5mQWFRZz5aohyveBTsSzdH8Gp6CQSUrOxMDGkvY8znw4L0LhdiIerxgSdFUlMywGgtr25zvJISMlm29k4JgX5o6cHL3RuwNdbz7PyaDRjujdUSfv11vO8+/sxArwc+Oq51mTnFfLTznBsLYzVthtyJZGen2/F1tyYj4P8sbM0YdWRaIZ9vbvKMgV4ObD+/V4EzdnJsHZevNarEVD1aM7Ib/ex6mg0vZrV4bVejUhMzebb7RdpP3kTe6Y8oQxSSvx5OAorMyOmDg2gjr05G09cY8aGs9hamvDxYH9luu1h8YREJjKktScejlbczcxl+cErDJu/m9/e6MILnRtUuU/l0aY+U7Py6DA5mCu3UhndtSEtvRw4E5PET7susTMsnhMzBivXyysoovcX2zgYnkDf5m680LkBJkYGnItNZuOJqw90K2nnuXh+2hXO2B4NGdGxPkv3RTD2p4OYGBoweeVJ2vk4M/mpFpyMus3yQ1cwNTLgl1c7KdePTcqg/eRNZOUVMLa7L94u1kTfSuP7HZfYe+EGJ2YMxsZcdf97fb6Vp9vWI/iDPtzJyOX9P47R/8vtxHwzHCszoyrby/oTVxmxYA/dm9Tms2cCMDcx5PqdTHaExhGfnFVp0Fni9wORmBgZMO3pAOo5WpFfWATA5JUn+WL9WXo0rc20p1tiZKDPuuNXGTZ/N9+Mbs//ejdWbkOb4/2wPIxyP7twDxtOXGNIa096+blxNTGd5xbuxcvZusr8DfT12TOlP6N/OICxoT4/ju2o/MzFtrhf+37HRcYtPoJvbVsmDWmBnh4s3RfJ0Hm7WDCqHeP7Nqkynw9XHGfWpjCauNnx7gA/nG3MiL6VxpqQGGY+G4iBfnFb0LaPeND2XqLX51t5vXdjZj4bSEJKNhOWhdD1080c+/xJGtWxrXTfNDlWmvSZ2hxztfI3q4OXkxWL915WCzrPX7/LiajbfDS4uTLgPBmdRNdPN2NkoM8bvRvhYmvOltOxTPrrBKdjklj9To9K97kq4xcfoYmbHd+81B5bC2PsLU0qTOvtUtxO1xyLoWtjV7WgsSxtyh6TmE6HKZu4nZbDqC4+tKhXi4ycAo5F3mLv+RvKoLM8v+65TG5BIS91a0htO3OuJqbz8+5wOk0N5uTMwfjWrrxdiIevRgedufmFfLnhLBYmhozo4K2zfJYeiKCwqEgZOPl71sLP3Z4l+y6rBJ0pmXl8svIkzT3sOfjpQGVnNrqbD43eWaO23Xd/P4YecOSzQdRzKh7NeLWHL73uXY1Xxs7CRHl7v24ti0pP3BI7z8Wz6mg0w9p58eeb3dG7N73t6XZeBHy4nvGLj3B8xmCVdVKy8jgxczB2FsUdVqC3I0cibvHt3xdUgs5Xe/jy/oBmKuu+3a8prT7awBfrzlYr6NS2PmdvCiMyIZVvR7fnjVJfDv6etRi3+Agz1p9h1sg2AMzfeo6D4Ql8MqQF04cFqGynSKHQuqylhV1LJmz2U9S/15GP7Fgfz3F/8uL3+/j8mUA+GtxcmTYjp4DfD0Qy/8W2WJoWj/68ueQombkFnPoyCC+n+yO6gwM96TglmAXbLjDlKdVRtGc71ldZZmliSLfpW1hxOIpXe/pW2V42HL+KnYUJ2yf1U34pAkweotloHUBSei5XFg7D1fb+BeDpmCS+WH+WcX0as/Cl9srl4/s2YcBX2/n4zxO82LkBlqZGWh/vh+VBy70jLJ4NJ67xak9ffhhzP2Ds0ay2cmStMnp60LWxKxYmhpgaG6gdm7uZuUxcfhxvZ2uOffEk1vdGCV/v1ZiAD9czcflxRnTwxsHKtLzNA8UXuLM2hdGjaW02T+yjMv/9y2dbK/uC6vQRD9reSzzZykOZtoGLDZs+6E29cSv5YFkIwRP7VLhvmh6rqs4BbY55efT0YHT3hnzy10mOX7lN6/qOys8W771c/Hm3+98Xb/92lNz8Qo58Ngg/9+IR5HF9mjBi4R5WHY1my5nr9G9R/sijJhytzdjyYV/lMazMsx28WbD1PD/vDif4VCzdmrgS4O1IuwZOtK7viH6ZjWhT9v/9epiElGx2TOpHz2aqdyyr6mv/GNdV7S7QqK4++E1Yy/wt5/mp1AWaeDRq7CQIhQJe/vEA56/fZeFL7VW+MB62JXsj6OTrogwMAV7o0oCjEYlcik9RLtsRFkdWbgFvPdFU5erZydqMl7qqzjdNTMvmaEQiz7T3UtmuoYE+7/ZXDd4elg3HrwLw8b0R2xJ+7vY82cqDk9FJxCVnqqwzoKW7MuAsEejtSEJKtsprYMp2DLn5hSiAJ1rUJTIhleSMXK3Lq019QvFonaO1qdqDGK/08MXZxoz1J64pl/15KAo7CxMmBfmX3YxaB6utQa08lF/AAC62ZjS8d0X+Vj/VEanOjV3ILyzi6u0MoHi0dvPpWAYHelDbzpyc/ELlP3/PWvi42rDrXLxans93qq/yd6B38RfelQTN5mLZmBuTnpNP8KnYagfdT7Soq3YerjgUBcCb/Zqq7EtOfiEjOniTnp1PyJXbgPbH+2F50HJvPHEVgIlPNlfZRs9mdWhZz+GBy7czLJ7M3ALG92uiDDgBrM2MeLNfE3LyC/n7bFyl2/jz3v58MTxQ7YHL0s29On3Eg7T30kaXuWvkZG3GgJZ12R4WT3ZeQYX7ps2xqszD2M7org0xNNBn8d7LymV5BUUsO3iFLo1c8b438p2Yls2RiFsMCvBQBm1QfCxK+qSSY1Fdo7r6aBRwApgZG3J4+iC+GB6Ii60ZK49G897vx2g/eRP131zF1jPXlWm1KXtyRi7bw+Lo519XLeCEqvva0t8rRYriealutSxo7lGLkMhEzXZOPFQ1cqRToYDXFh1ixeEoPh/eSuMvpLjkTAru3ToDMDLQr/K24cHwBCJupvJcp/pcSUhTLm/bwAk9veIr2Nn3Rs+iE9MByr0V1MhNdVn0reK0vuWkbeymm1sGMYnp6OvplVu+Zu72rDt+lehb6SoPa7mVUz8lHcGdjFzlVX9mbgEz159lTUgMUbfSKCxSDVxSsvIqvb1THm3qE4r3r5W3o9qtIUMDfXzr2HIoPAGForhzjExIw9/TXidvPChvvqm9pQlO1mZqE/lL6uROevE0kcs3UilSKFi6P5Kl+yPL3b6PQn1+lVst1eNkbmKIgb4ed9I1C/bfH+jHznPxDJm7k1pW/cU9HwAAIABJREFUJnT2daWPvxvPtPNSu5VfkXqOlmrLSi7KfN5eVeF6ianZgPbH+2F5GOU2MzZUzr0srbGbLadjkh6ofDG3i+uliZud2mfN7n3pl9RdRSIT0tDTA79SczLLzasafcSDtPfSSo/q319mTUFhEbFJmeXOKwTtjlVlHsZ2XGzNGNjSnb+ORDHvhbaYmxiy8eQ1ktJzeLlUUB1z73g1qat+TBvVscVAX4/oxDS1z7RRr5z6rIyVmREfDW7OR4Obk51XwKnoJNYfv8p3Oy7y1LxdnJoZRGM3W63KfiUhDYUC/D0rb3cViUlM59M1p9kZFs/NlCyVz+rWqnrKj3j4alzQWaRQ8OrPh/h172U+e6aVyi3eqnSaEsy1pPtX2N7O1kQuGFbpOiVXrFNXnWLqqlNqn/9x8AozRgRiZKBPyQCRHupXb2UHj7RJ+7BUtllFBZlWdiFaep2h83ax+/wN3uvfjPYNnbG3NMFAX48l+yL4ZXc4RUXa79TDrKOy+6dQKMrdblkVpSgoKqrgEzA0KH+tyuZJKZT/Lf6/UV18eKWCeaXlzdutqJyKSo/6fe4OloTNfoo952+w61w8By4l8PqiQ0xZdYrdk58oN+Apq7wAXqFQoK+nx96p/TGqYP9LRsm0Pd7VOTblefByV1zHD+Ncrmwbmm6/pL1X1eKr00c8SHsvLbegUKNl5ZVL02P1KLYztqcv609cZfWxGF7s0oDFey9ja2HMU6UeRNK2XVSnrZs8wNPgZsaGdPR1oaOvC+6Olryz9BjLDkYyY0SgVmUv6X806WvLSsnMo8OUTSgU8MGTzWniZoelqSH6enqMX3KEhJSqLyTEw1ejgs4ihYKXfzzA0v2RfPZMq3JvjVbmp1c6kZV7/zaNpWnl1Zeenc+aYzH0alaHseUEAOeuJfPZujNsPh1LUKAn3s7FV5YX4++qzOcBuHwjReVvb5fitJfi1Z+6Dy+TtiLansjeztZsV8RxKT5F5bYIwIW44jy9nLW7OobiJxO3h8Yxvm8T5ROUJX7ceUnr7d0vr+b1CcVlv3wjhYLCIpUvvMIiBZdvpOLpaKUMon1cbQi/kUJufmGlo532liblTg2ISniwUYiK1He2QU8PElKyaNvAqeoVtFBVezEy0KdPczf6NHcD4GhEIh2nbmLWxlCW/q9rtfJs4GrD36FxGBvqqz2AUpa2x1uXx0a7cluzIyyeq7fT1UY7NT2XoeILvJJbshfi7tKjaW2Vzy7ce2tHeaOEpfnc25+w2GTl1IuK8tJFH6GJC9dTcGpiprLsYtxdDA30Kx3V0uZYVXYOaLOdyvT2q4OHgyWL916me9Pa7AyL5/VejVQuFkvq8ML18vr/VAqLFHg53Q9wH3U/VFoLz+IpIiXTKrQpewOX4v7s7LU7Wue77WzxawrXvNtT7Q0NSWnqI+Xi0agxczoLixS8+N1+lu6P5IvhgVoHnFDcGQwO9FD+K2+OSWl/HYkiM7eAV3s1Ymibemr/Jj7ZHHMTQxbvKR4N7eVXBzNjQ77ZdoHc/PtX6LfTcliyL0Jl207WZrRt4MSqozEqo6+FRQrmbzmn0f5YmBqip4fG8yWfDPQA4MsNoSpXq+ev32XjyWu08nIo93Z6VUo68tJTF6D41srqYzFab6+ENvUJMLiVJ7fTcli057LK8l92h3MrNZuge/sPMKKjN8kZuXy5MVRtO6XrpoGrDdl5BRy4lKBcVqRQMCc4rNr7VZlaViY84V+Xv0Pj2FJqHlXpvBPTqneFX1l7KW9Zi3q1MDE04O4D/FzpyHtzTT9YdpycfPVRq9K3zLQ93ro8NtqUe1Cr4nY1e5Nqvnsu3OBUtOa31q3MjMo9Dr2a1cHCxJBv/75Aevb9edTp2fks3HYeUyMD+vq7Vbrt4fcetJy88iR5Barnaen2rqs+QhNzN4epzCc+HZPEznPx9ParU+kvzmlzrCo7B7TZTmX09fQY3b0hB8MT+PjPE8WDJeXMV23v40zw6VjOlwreFAqYsf4sAINLBVq67ocOhScQm6Q+zxaKvwfh/lQwbcpub2lCHz83tp29zt4LN9W2XdmoaclFWNnvleWHrqh8Z4pHq8aMdE5cfpzlh67g42pDYVERn687o/J5k7p2BAV6PtQ8F++NwNzEkL7Ny+/QzU0M6edflw0nrnLjbha17cyZPiyACctC6DQ1mJGdG5CdV8DPu8Jxd7BU6+jmPt+WbtM30+6TjfyvTxPsLIxZdTSatGzNfqfZ1MiA5h61WH/iKk3d7XG1NcPCxIiBAe7lpu/VrA7D2nnx15Eo7mbm0r+l+73XoVzAyECfb0a3L3e9qrjYmtHOx4mfd4djZKBPc89aylf8NHaz1eqLtzQ7CxOt6nPCID9WH4th3OIjnLl6B3/PWoRevcOiPZfxdrbmo1IXKm/3a0rwyVg+XXOa41du06NZbUyMDLhw/S7Xbmew5cPip2XHdG/IVxtDGTpvF//r0xhTYwM2nrim8oT3w/b9mI50mhrMk7N2MKKDt3LULyohjQ0nrzGme0M+0eKp8hKVtZeBs7ZjamRA50auuDtYkpGTz/JDV8jJL+T5ar7uCoofaJr2dADTVp+i2ftrGdmpPnXsLbhxN4uTUbfZeS6e7D9eArQ/3ro8NtqUu09zNwYGuPPDzkvcTsuhp18driam8+32izRzt+dcbLJGebap78TXW8/z4Yrj+HnUQl8PBgZ4YGthzFfPtWbc4iO0/WQjL3YpnsO+dH8EVxLS+HpUu0qfXIfiOejvDWjG3M3nCPx4A0Pb1sPFxoyrt9NZdTSGc3OewtTIQGd9hCZu3s2i74y/CWrtya2ULBb+fQELEyO+eq51petpc6wqOwe02U5VXu7WkOlrTrP80BUCvBzwL+edkl+PakfXTzfTeVowb/RqjItd8WuHtofGMaS1p8qT67ruh7aHxjFzQyidGrnQroETjjZmpGTksut8PEcjEvF2tub1e6+Y0rbs34xuT4cpwfSZsY1RXRrQop4DWbkFhEQm0qiOLZ+WeXtIiW5NamNnYcK4xUeITEjDxdaMkMhE1oTE4ONqQ2ZuxQ+XCd2pMUFn1L2X10bcTGVKOXMrn2nn9VCDzotxKYRcKX7vZGVX2UNae7I2JIbf9kfw8WB/3hvQDGszI+ZtOccHy0LUXm5dWjsfJ3Z+8gQfrTjB5+vOYGVqxOBADz4Y1LzSyeyl/fpaZ97+7SiTV54kK7cADwfLCoNOgGXjutKiXi2W7ovk/T+OYW5iSGdfV6Y93bLcjlFTq9/pyQfLQvjraDQ/7w6nsZsdP4zpQHRierWDTkCr+rQxN+bQ9IFMW32KTadiWbIvAidrU8b2aMinwwJUnsI3MTJg1+QnmB0cxl+Ho5j010nMjItfDv9yqdea1LG3YOMHvflw+XFmbjiLvaUJz3Wsz//6NMZr/Mpq71dl6tay4OTMwczaGMrGk7GsPhaNuYkhbvYWBAV6MrRt9V+2X1F7ebmbL6uORvHzrnDuZORgZ2GCn7s9Wz/qW+FFl6amPNWCAC8HFm49z4Jt58nKLcDJ2oymde1YMKqdSlptjreuj4025f7rre5MWXWK5YeuEHw6lsZ1bFk2vis7Q+M1DjonBbXgxt0sFu25zN3MXBSK4pfDWzha8UbvxrjamTMnOIzpa0+jUBS/HH71Oz1U5gpWZvbINvh71uL7HReZtTEUBeBey5JBAe4qcxh11UdUZfOHfZi4/DiTV54kO6+Qtg0cmT2yjUbzibU5VpX1mdpspzK17czp38KdTaeuqbwmqbRWXg4cnj6IaatP8cPOS2Tm5lPPyYovhgcyYaDqG0x03dZf7t4QSzMjdobGs+zgFW6lZmNsqI+3szUfD/bn3QHNVPpPbcru7WzNyZmD+XztGbaeuc7S/ZHYWZrQ0rMW3ctMFynN2caMbR/15cMVx5m9qfiOVNsGTuyZ3J+P/jyhMsoqHh09RWWz2IUQ4j9gTUgMw+bvVvlFIvHfUPKLRHnLR1f5UvJ/k2e+3s3m09e58eOzGr8BQoh/uv/OGSqEEEL8B8QnZ7LhxDWGt9f8lWNC/BvUmNvrQgghxD/Z6Zgkzl+/yw87LqGg+P23QvyXSNAphBBC/AMs2RvB9zsv4uFgxW9vdKnyN+OF+LeROZ1CCCGEEELnZE6nEEIIIYTQOQk6hRBCCCGEzknQKYQQQgghdE6CTiGEEEIIoXMSdAohhBBCCJ2ToFMIIYQQQuicBJ1CCCGEEELnJOgUQgghhBA6J0Hnv1BGTj76wxfxztJjj7soOrX5dCz6wxex4cS1x10Urf2by/4g1oTEoD98EZtPxz7U7Y796SD6wxdRUFj0ULdbmZpynmlrR1g8nadtxmHMH+gPX4T+8EV8uuY0jd5drfx7TUjMIy3T5+vOKPMesWDPI81bU9q2J2l/4r+oRv0M5vU7mczeFMru8ze4lpSBsaE+te3Mae3txIiO3vRqVudxF7HasnILmB0cRjsfZ3r7/Xv342H5fsdFDA30eaWHr87yuBiXwupj0Yzo4I2Pq43O8hHakXNBdy7FpzBo1nbyCh5d8P9vom3bq+lt9VH00+KfpcYEnZfiU2g/eRNZuQUEtfbkuU71KSxScOVmKlvOxJJfWPSvCTotTY3I+uMlDPX1lMuy8gr4dM1p3hvQ7D/TeT3Roi5Zf7yEsaH2A/I/7LiEqbGBboPO+Lt8uuY0AV4OakHng5RdPJiHdS6Ud57VdMGnYpUBZ5v6TmyY0AsLE0OMDQ0wMzbkdlo2AA1r6EVYZW2vpvTb2ngU/bT4Z6kxQeeM9WdJzcpj3Xu9GBzoofJZQWER15IyHlPJNJeTX4ihvh6GBvqYGhk87uKQnp2PlZmRzravr6f3yPbzYe/Loyy70J2adgyrOg/ikzOV/+/vaY+zjZny7w8G+em0bP8FNa09CVFWjQk6o26loacH/VvWVfvM0EAfb2drteUno5P4Yt0ZDl1OID07H09HK0Z2qs+HTzbH0EB1BOtUdBJfbgzlwKWbpGbl4WxjRudGrnw+vBUeDpYAmD2/hKFt6vHHuK4q6/51JIpnF+5l60d96dvcDSiew/br3stELhjGJ3+dZPf5G9zJyCF8/tO42ppjPWopb/VryvwX27LrXDy9v9gGwNzN55i7+RwAHg6WnJ01hDqvr6BXszqsf7+X2j5OXH6c2cFhnJ8zlMZuthXWX4O3VuFia8bnz7Ri2prTnIpOwsbcmOvfj9C6rvZfvMnHf53gTMwdrM2NGdLak/cH+lH/zZW8N6AZs0e2AYrnRQ6atUPlQiG/sIhZm8JYcegK15IyMNTXw62WBd2b1GbhS+0pKCzC+LnFyrz0hy9S/n/0N8/g6WhV6b4kZ+Qye1MYu87HE5OYTmZuAZ6Olozs1ICJg/yU+zJ55Um+WH8WgEGzdijzeLFLA5a83qXcsgMkpecwbfVpNp28xq3UbJxszBgY4M6nTwfgaG2qTLfs4BVe+G4fmyf24fiVRJbuj+RmShYeDlZ8HOTPi10aVHisSpu/9TybT13jUnwKd9JzcbIx44kWdfn8mVYq+cUlZ+L+xp9MCvKnYW1b5m4OI/xGKnYWxozsWJ8ZIwLVjuOvey8zN/gc0Ylp1LG34JWevtRztNKoXCXCYpOZtvoUBy4lkJlbQD1HK57v3IAJA5up5QdwOz2HD5cfZ8uZ6+TkF9K2vhOzn29DC89aAJWeCzHfDteqTjJy8lXOs+rWU1kdpmziSkIacT88i1GZtFvPXGfAV9v57uUOvN6rEaB5m/lyYygf/3mCiK+HUd9FtT9r9O5qbM1NOPr5IOWyqs7p0q7eTsdr/EqVZT/tCuenXeHl7uOqd3owtE09oHi+5ZRVpwB4pp0Xr/VqxPS1pwm5chtDfT16+bmx8KV2uNqa31//aDR/HIjkYnwKyRm5ZOQUB8O+tW0Z3sGbN3o1qrKeq3IzJYvP1p5hy+lYElKycbAyZUCAO589E4CT9f1guqQvvvbdCCavPMmWM7Fk5BQQUM+BuS+0oU19J6Dqtle2PWnSViuy8mg03/59gdBryRQWKWjmbsf7A/2UdV6i5Bj/NLYT7/5+lCMRiZgaGTCyU32+eq41RUUKpqw6xYrDUSSl59Da25Efx3akUR3V74Lc/ELmbjnHikNXiLqVjqmRAR0bOjP9mVbKcw8077c06afFf1ONCTrru1hzLDKRPw9H8ULnqr+wt565zpC5O/F0tOLtJ5rhYGXK0YhbTF19irBryax6p4cy7aZT13h63m6szIwY070hXs7WJKRk8/fZ64THpyiDzuro8dlWggI9+evt7uTkFVLL0lQtTYCXA+vf70XQnJ0Mu9epQ/FVtY25MU+3rcfyQ1EkpGTjYnu/My0oLOKPg5G083GqNOAsEZmQxtD5u/g4qAWfP9OKOxk5WtfVofAE+szYhoOVKR8H+WNnacKaYzEMm79bo/qY8EcI32y/wOiuDXmnfzOKFAqib6Xx99k4AAz09dkzpT+jfziAsaE+P47tqFzXpdSXWkX7Ep2Yzu8HIhnath4vdvGhSKFgR2gck1eeJCYxjUWvdgZgdLeGGBro8+ma03wxPJB2Pk738rhfv2WlZuXRYXIwV26lMrprQ1p6OXAmJomfdl1iZ1g8J2YMxtbCWGWd8UuO0MrLgUWvdcbEUJ/ZwWG89MN+6rtY06Ghc5X19dXGUHr71aFP87rYmBtzOiaJpfsjOHw5gVMzgzApM/Ly5+EorMyMmDo0gDr25mw8cY0ZG85ia2nCx4P9lem+3nqed38/RoCXA18915rsvEJ+2hmuVv7KnIxOouunmzEy0OeN3o1wsTVny+lYJv11gtMxSawu1W5KPDHzb5yszfjsmVbcSsli4d8X6PrpZo59/iSN6thWei5Ut07Ko2k9lWdsD19G/3CATSev8VSZIGHJvgjMjA15toM3UL02o42KzgNdOXApgdXHYihSKJTL1obEkJSew94p/ZXLdobFs+XMdZV1UzLzOBaZyLHIRA6HJ7DybfX2oanYpIzi6VZ5BYzt7ou3izXRt9L4fscl9l64wYkZg7ExV63XXp9v5em29Qj+oA93MnJ5/49j9P9yOzHfDMfKzEijtleatulLlFzw9mham2lPt8TIQJ91x68ybP5uvhndnv/1bqySPiElmx6fbWFggDt9/euyMyyer7eex1Bfj8s3U0nOyOXNfk24k57Dwm0XCJqzk4vzhqKvVzwNIL+wiH4z/+ZgeAIjO9Xnf32akJqVx6Ld4XScEsz+aQNo5eWgkmdV/Zam/bT476kxQedHg/3ZcOIao77fz6xNoXRu5ErLeg508nWhYW3V+Uc5+YW8/OMB/NztOfjpQOWX0Ks9fWnsZseHK45z4FICnRu5kJlbwMs/HsDO0oTTXwZR2+7+CTPlqRYqnWt1DGntybwX2qosy8jJV/nbzsJEGYDUrWVB18auKp+P7eHL0v2R/H4gUuUW2JYz10lIyebzZwI1Kktiajbr3+/Fk63uj9xpU1cA7/0RgqGBPkc/f5K6tSwAeK1nI3p/sVWjMqw/cZWBLT345dVOKsu/fLY1AHp60LWxKxYmhpgaG6jVRWX7AtC0rh3XvhuuMoryZt8mvPLzQRbvjWDq0ADq1rKgnpMVTeraAdDM3a7CfEqbvSmMyIRUvh3dnjdKfTH4e9Zi3OIjzFh/hln3RnlLeDpaqny5NveohdsbK/hu+0WNgs7Ir4ep3S7t3MiF57/dx9rjV5XBTYmUrDxOzByMnYUJAIHejhyJuMW3f19QBlMpmXl8svIkzT2Kj3nJl+Tobj40emdNlWUq8fZvR8nNL+TIZ4Pwc7cHYFyfJoxYuIdVR6PZcuY6/Vuo3plwd7Bk/fu9lF+IQa3rEfDhej5YFkLwxD5VngvVqZPyaFJPFXmmnRfv/n6MxXsjVILO22k5bDp5jeHtvZQBT3XajDYqOg/KcrU158C0AXy99Tzrjl8F4MlWHrw3oJkyzchv9xFbxTSlmylZDGvnxQeD/DgVncRriw6hUBTf/Qi9lkxzj+J28ETLuvTxd8O3ti12FsYogPD4FEb/cIC45ExWH4vho6t38C81yqaNN5ccJTO3gFNfBuHldH9UbXCgJx2nBLNg2wWmPNVCZZ1nO9ZXWWZpYki36VtYcTiKV3v6atT2StM2PcDpmCS+WH+WcX0as/Cl9srl4/s2YcBX2/n4zxO82LkBlqb323fUrTT+GNeV5zrWB+Ctfk1p+eE65m45Rx8/N/ZPG6A8n5xtzXnv92PsDIunz727bt9uv8i+izdZ/U4Plfb6Ws9GNH1/DROXh7B78v0LBqi639K0nxb/PTXmKYdGdWwJnTWE8X2bkJNXyI87L/HKzwdp9O5qOk/bTMTNVGXanWHx3ErN5rVejVFQHFiV/Btx7wtp17l4AHaExXEnPZd3+zdTCThLlJzM1TW6W8MHWh+gvY8zTevasWTfZZXli/dextLUiGHt6lWwpioHK1O1Lydt6upWajYnom7zTDsvZcAJYKCvx7sDNJsPZmNuTOi1O4TFJmuUXpt9geJRhtIBZ15BETn5hQxq5UGRQsHJ6NvVznP9ias4WpuqTZp/pYcvzjZmrC/n9UojO6mOypfcYoxMSFVLW57SwVVBYfG+PNGiLvp6ehyPTFRLP6CluzKQKhHo7UhCSrbyYmdHWBxZuQW89URTlVEZJ2szXurqo1G5EtOyORJxi0EBHsqAE4ovGiYFFQdtG+4FN6VNGOinck4197Cnr78b28Piyc4r0ChvbeukPJrUU0XMjA15rkN9tofGEVdqjuQfByPJLyxSOeer02a0UdF5UJaJkQEdfV1UboG72JrR0ddF+c/MuOoRYkdrU357owst6zkwtocvjevYKT8r3QcPaOlOalYe01afYuCsHfSYvpXXFx3mbmauMs1RDY9VWalZeWw+HcvgQA9q25mr9Fn+nrXwcbVR9lmlPd+pvsrfgd6OAFzR8Fx8GFYcigLgzX5NVcpd0t+mZ+cTckW1j3K2MVMGnFB8jnVu5IpCAeP7NVE5n7rcGxyITEhTLlt+8Ar1nKzo39JdJT9TYwMGtHTnYPgtcvMLVfJ80H5L/HfVmJFOgHpOViwY1Y4Fo9qRnJHL4cu3+G1fBOtPXGXQrB2c+SoIM2NDLsWnADDmpwOM+elAudtKTC1+SjPyZvHJWd0rbk3K/DCM6eHL278d5VB4Ah19XbiZksW2s3GM6qJ6VVwZT0f1aQLa1FVMYjoADWur38r3ra3Z066zR7ZhxMI9+H+wjnpOVnRr4soTLdx5spUHBlo8ZVzevpT4ZXc4i/Zc5lxsMjllOtO7GXka51FWTGI6rbwd1eaiGRro41vHlkPhCSgUxV8KJdxKBeclrM2MiLqVrlGe+y/e5MuNoRyLTCQ1S7Xspb/AlfnZq+dXEqTdycjF0tSI6HvHsey8L4BGGkzTgPttoWS0WGUbdWwx0NcjOjGtnM/U0zeuY8vWM9eJTcpUu2tRHm3rpDya1FNlxvb05bsdF/ltXwSfDCkePVuyN4IGLjZ0KTXqU502o43KzgNdCKjnoHKhYmd5P3BPziiu+yKFgj5fbGPfxZuVbkvTY1XW5RupFCkULN0fydL9keWm8VGot6Oy56K5iSEG+nrcSa9eOaqjpL/1eXtVhWlK+tsS5c2PtL9X72W/X0qW30m/P83iUnwK2XkFmD+/pMI8kzNzVS5IHrTfEv9dNSroLM3e0oSBAe4MDHBnyNydbDhxjQOXEujT3A0FxbfEF4xqp7yaLcvp3lObJWk16fQrSlJQWPEteJOH9Mqd5zvV56MVJ1i8N4KOvi4s3R9JQWERL3fXfCS1vLluWtVVJTMNNJ2F0Ke5G9ELh7P17HX2XbjBnvM3WLw3gkBvR/ZO6Y+5iWZNuqJ5eyUPYwxr58V7A5rhYmuOsaE+Z6/e4Y1fDz/wdImKKCrYbkVtpqL0pR0KT6Dn51vx96zFvBfa4ulohZmxAQoFdJ4WTFE5m6isHZfkWZK1Xjml07R6qluN5ZVPm01Vp040LYeyPBrsnJ+7PW3qO7FkXwSTgloQciWRC3F3mfmsZlNdysunsjJV1MdoMn/1YSo7T7L0hWLJ/mw7G6cMOC1Njdg4oTcdGjpjbKjPEzP/5u/Q4vnbRZoerDJK+qxRXXx4pWf5r+opb15lheeiVi3wwSgUCvT19Ng7tb/aQ2glyj5EZmhQccMw1K/6+0WhUODnbq8y77Ksss8aPEi/Jf7bamzQWZq/pwMbTlxT3upq4FJ8lZuVV0DbBk6VrlvyfsazV+9U+Z5Pe0sT5dV8aVG31Ed0tFVeAFCanYUJQ9t4svpYNF+PaseSvRE0rWunfPKyurSpKy/n4qvqiJspap9dvqn5bRdbC2Oe7eCtnHv31aZQPlpxgr+ORDO6W/Ht3eqO/Py2L4Jm7vb89VZ3leUlIwylaZuFl7MVl2+kUFBYpDJyVVik4PKNVDwdrapd7vIsO3iFIoWCbR/1xcHq/pdCTGI6hdX8wgbwvnccL8bfpXV91QuNyzfU66k8JW3hwvW7ap+F30ilsEiBl5P6GyUuxt2lo6+Lavr4FAwN9JVTNio7F3RVJ9UxtocvY346wN6LN/jrcBSGBvq82Fl1eoI2bcb+3u3+sn1MfmERsUkZKvv7TxZZqi8I9HagW5Pikd/c/ELOXrvzwNuv72yDnh4kpGRV2Wdpq6p++EHTN3C14e/QOIwN9R+479Ymz2tJGbSs5/DQ3zv8MPs78e9QY+Z0Bp+KJT1bfa5Vdl4BG09cBe7fLuzT3A0nGzPmBIdx/U6m2jqZuQWk3dtWbz83almZMG/zORJSstXSlr6w83G14WjkLeW6UHyL6Jfd5b92RBsWpobo6al/4ZQ2pocvmbkFjFt8mMiE1IcyX1SbunK2MaOVlwMrj0SrvO+vsEjB11vOVZmXQlH8EEtZJZ1v6dttVmZGldadKrGQAAAgAElEQVRFRfT0oLCoSOW45eQX8vXW82pprcyKR200zWdwK09up+WwaI/q3NpfdodzKzWboDLvj31QJR162VGuLzeGPtB2e/nVwczYkG+2XVCZy3U7LYcl+yI02oaTtRntfZwJPh3L+VKBp0JR/E5dgMGtPdXWm7v5nMqxORebzN+hcfT2q6Mc5a7sXNBVnVTHM+29sDYz4pttF1h5NJr+Leqqvf1AmzZTcgFcMhJYYt6Wc+Q/wp8PfVAepW75n4hKYs+FG0TcTGX0jwfK7WO1VcvKhCf86/J3aJzaE/JQfHs/Ma16+WjSDz9I+pH35pV+sOy42tQfKH5Q62F7vnMDUrPymLLqZLmfP0ie1e2nxb9XjRnpnLUplBEL99DHz43mnrWwNDXk5t0s1oZc5ertdIa09qS9T/GThBYmhix9owtBc3bS7P01vNytIQ3r2JKSmcel+LusP36VzRP70NHXBQsTQ35+pRPPfL0HvwlreblbQ7ycrUhMy+Hvs9f5ZEgL5VOA/+vTmKfn76bLtGCe69SAjOw8luyLoJ6TFTfuPlhnYWpkQHOPWqw/cZWm7va42pphYWLEwAB3ZZpOvi40qmPLsoNXMDbU5/lOmr3rsTLa1BUUz8ns/cU22n6yidd7N8LW3Jg1x2KUgWllF765BYXUfm05T7bywL9eLVxtzYlLzuTHHZewNDViSKkgpU19J77eep4PVxzHz6MW+nowMMADiypuvw9p7cnMDaEMnrODQa08uJOey2/7IrA0VV+vhWctjAz0mRMcRm5+IVZmRtRzsqpwBGLCID9WH4th3OIjnLn35G3o1Tss2nMZb2drPgqq/KlnbQ0O9OSnXeH0/mIrr/RshJ4eBJ+8RkJKdoW35jRhZ2HC9GEBTFgWQqepwYzs3IDsvAJ+3hWOu4Olxl8iX49qR9dPN9N5WjBv9GqMi13xK5O2h8YxpLWn2pPrUPy+yH4zt/FkoCeJqdks3HYBM2NDvnqutTJNZeeCruqkOixMDHm2Y31+3HkJoNypLtq0mc6NXPFzt+fzdWe4nZqN973XxJ2Iul3uQ47/VH383PB2tibqVhoZOfn0/Kz4zRYWJob4uNqoPHBUXd+P6UinqcE8OWsHIzp4K0fsoxLS2HDyGmO6N1TOtdWGJv3wg6QP9HZk2tMBTFt9imbvr2Vkp/rUsbfgxt0sTkbdZue5eLL/eEnrclfmrX5N2BUWx6xNYYRcuU0//+JXjcUmpbPr3A1szI3ZMalftbZd3X5a/HvVmCM75/k2rAu5yr6LNzkccYvkjFysTI1oUteODwb5MaZMh9+nuRvHZwzmq42h/HUkmqT0HOwsTfB2tmLCoOYqD0AEBXpyYNoAvtwQyi97wsnIycfF1pzOjVxUHrZ4qk095r3QlgVbzzPpzxPUc7Ji0pAWWJsZcfjyrQfex19f68zbvx1l8sqTZOUW4OFgqdZ5jenhy3u/H2NwoCe1rEwq2JJ2tKmrLo1d2fZxXz756ySfrT2DlakRQa09eb1XI1p+uB4z44qbpJGBPm/1a8q+izfZdT6e9Ozieu7axJWPBvurTIqfFNSCG3ezWLTnMnczc1Eoil86bFHFS4enDg1AX0+PZQevsD00Djd7C17s6kO3xq50nrZZJW3Jk7hfrD/D+CVHyCso4sUuDSoMOm3MjTk0fSDTVp9i06lYluyLwMnalLE9GvLpsAC1p6EfVJ/mbvwxritfbQzlg2UhWJsb079FXf4Y1w33//35QNt+b0AzrM2MmLflHB8sC1F5OfzwBXs02kYrLwcOTx/EtNWn+GHnJTJz86nnZMUXwwOZMLBZuets/agvE5cfZ/LKk2TnFdK2gSOzR7ahiZvqA0YVnQu6rJPqeKWHLz/uvERtO3P6+asH2dq0GT09WPteT95ccpQl+yMw1NejZ7M67Js6QONXkv0TmJsYsvOTfrz3ewgHwm+Sm19E2wZOfPlsIJ+tPfNQgs66tSw4OXMwszaGsvFkLKuPRWNuYoibvQVBgZ4MbavZGz3Ko0k//CDppzzVggAvBxZuPc+CbefJyi3AydqMpnXtWDCqXbXLXREjA302T+zD9zsv8ceBSKavPQ1AbTtz2tR34gUNf6iiPNXtp8W/l55CZvbWKN/vuMi4xUfYPqnfP+q35neExdN3xjZ+fqWT2gWAEP9VF+NSaPr+Gj4e7M/nw1s97uIIIYRO1Zg5naJ4rtKPOy/h5WRFz6aPJ+AsUijU5pcVKRTM2xyGvp4ePZvVfizlEuJx+G77BfT19LR6i4QQQvxb1Zjb6zVZXHImBy4lsP3sdc5fv8vPr3R6bE8NpmXl4zdhLS90bkADVxvuZOSw+mgMIVcSebNvE/nNXfGfl1dQxLrjMUTcTOPn3eGM6OD90N7HK4QQ/2QSdNYAxyITGfnNXmpZmTBhoB8vP4Sn1qvLzNiArk1c+fNI8W/BKxQKGta2YcGodozr0+SxlUuIRyUtO49nF+7F3MSQAS3ddTIPTwgh/olkTqcQQgghhNA5mdMphBBCCCF0ToJOIYQQQgihcxJ0CiGEEEIInZOgUwghhBBC6JwEnUIIIYQQQuck6BRCCCGEEDonQacQQgghhNA5CTqFEEIIIYTOSdD5mAR+vIEWE9epLV9+6ApN3luDyXOL0R++iLNX7/DlxlD0hy8i/EbKYyipEEIIIcSDk6BTR7JyC/h0zWl2hMVrvM652GRe/G4/Hg6WLH69M6ve6fFIf5N565nrfLrmNHkFRY8sTyGEEELUDPIzmDqSlJ6D09hlvDegGbNHtlH7vCSwMza8H/fP23KO9/8I4eK8ofjWtlUuzy8sIje/EAsTI/T0dFfm8YuP8N2Oi6T99iKWpka6y0gIIYQQNY7h4y7Af01hkYLc/MIq05UONkvcSskGwNbcRGW5kYE+RgYyKC2EEEKIf68aNdKZm1/I/K3n+fNwFJE3UzExMsDH1YbR3Rryak9flXRzt5xjxaErRN1Kx9TIgI4NnZn+TCtaeNZSplt28AovfLePDRN6cehSAquOxRCfnMn4vk34eut5tfw9HCyJ+XY4UDyns6CwiDNfDSEhJZvary2vsvxlR0BvpmQxY91Ztpy5zo27mdhamODvYc+Hg/3p2tgVKL5l/+32ixy4dJO4O5kANHO3590BzRjapp5yW52mBnP48i21PBe/3plRXXyU+X229gxbTseSkJKNg5UpAwLc+eyZAJyszaos/+20HKJupeHuYEltO/Mq0wshhBDiv6PGjHTmFRTR+4ttHAxPoG9zN17o3AATIwPOxSaz8cRVZdCZX1hEv5l/czA8gZGd6vO/Pk1Izcpj0e5wOk4JZv+0AbTyclDZ9vjFR2jiZsc3L7XH1sIYQwM9ujR2JWjOToa18+K1Xo0AMDUyKLds9pYm7JnSnx93XmLV0WjWvNsTe8v7o51/Ho7il93hKuvEJKbTYcombqflMKqLDy3q1SIjp4BjkbfYe/6GMujcHhZPSGQiQ1p74uFoxd3MXJYfvMKw+bv57Y0uvNC5AQALX2rPZ2tPs+HENbZ82Acz4+KmURLkxiZl0H7yJrLyChjb3RdvF2uib6Xx/Y5L7L1wgxMzBmNjblzpMdh48hqv/HyQGSMC+fDJ5hodNyGEEEL8N9SYoHP+1nMcDE/gkyEtmD4sQOWzolKDvd9uv8i+izdZ/U4Pnio1Evhaz0Y0fX8NE5eHsHtyf5X1Ha3N2PJhX5X5lknpOQDUrWWhDAArYmyoT9fGrmw5HQtAex9nXGzvjxwei0xUW+d/vx4mISWbHZP60bNZnQr359Uevrw/oJnK52/3a0qrjzbwxbqzyqCzhWct6thZANDJ10VtTuebS46SmVvAqS+D8Cr1cNPgQE86TglmwbYLTHmqRaX7KYQQQoiaq8ZMFPzzUBR2FiZMCvJX+0y/VLS4/OAV6jlZ0b+lOzn5hcp/psYGDGjpzsHwW2pzNkd19dHpAz5lJWfksj0sjn7+ddUCTlDdHysz1eAxN78QBfBEi7pEJqSSnJFbZX6pWXlsPh3L4EAPatuZq9SLv2ctfFxt2HWu6qf0x3RvSNFfY2SUUwghhKiBasxIZ2RCGv6e9phUcIu7xKX4FLLzCjB/fkmFaZIzc3G1vT8n8VG+1gjgSkIaCgX4e9pXmTYzt4CZ68+yJiSGqFtpFBapTuFNycpTuZVfnss3UilSKFi6P5Kl+yPLTeOjsNF8B4QQQghR49SYoFOhUKBH1cORCoUCP3d7fhzbscI0tSxNVf42KedJdF1SUBw4arI/Q+ftYvf5G7zXvxntGzpjb2mCgb4eS/ZF8MvucIqKqn6OrCS/UV18eKXUA1elVTRfVQghhBACalDQ6eNqQ/iNFHLzCysd7WzgasO1pAxa1nMo97VGmtIkIKyuBi426OnB2Wt3Kk13MyWL7aFxjO/bhJnPBqp89uPOS2rpK5oiUN+5OL+ElCzaNnCqdrmFEEIIUXPVmDmdIzp6k5yRy5cbQ9U+K/3SqOc7NyA1K48pq06Wu52bKVka5WdhaoieHhrNmdSWvaUJffzc2Hb2Onsv3FT7vGR/SgLfgkLVXxj6P3v3HRXF9fYB/EsXFESxxRLdVWARBBFFRVFi7wEURbFjTwRLDPaCsRcCVhSxYgWsJMbeolFssUSj0QQVBZTQUeq8f/iyP9elLMiwGr+fc/Yc986de5+ZnVmevXdm/CsmGft++1tpvbzrP9+P2cRQD90a18HR358h4sZTpfVyBQFxya+LjPtl8hv89jAOzxNU24dERET03/HZjHRO6GqFw1efYF7odVz56yXaN6oJPR0t3H2agKiXqYiY2hkA4N3VEiduPcPSQ7dw+a+X6Nq4Dioa6OLJqxScuP0cFQ10cWxG1yL7K6ejBZu6Jtgf+Q+svqyML4z1UV5PBz3tviyV7Vk13AGtZh9G54U/Y2hbU9hKqiA9IxuXH8bBopYx5vW1Qw1jfbQ0q4YNJ+9DR0sTNvVM5I85aljbGNcev1Jo077B21HMCVsvwblZPehqa6J5g2qQVDPE2hGt4TjnML5eegz9W9WHfYOqAIBHMck4cDUKI9qZY6Zr4Xev85FJREREn6/PJunU09HCiVndsOzwLez+9RFm7L4Kfd23D4f3/MpcXk9HSxNHfDpj7fF72H7uIXzDrgMAalYyQPMG1TC4ranKfW4a0wYTtlzCrD1XkZ6RjbpVKpRa0lm/uhGuLnLGD2E38NONp9h69iEqVdBDk3omaGdVU15v38QO+H7HZey+9BgbTt5Hw9qVsG5EKzyOS1FKOnvZ1cXUr22w4/xfOHztCXJyBQSPbQNJNUPUMSmPq4ucsfTg7zh49Qn2/fYYBnraqF25PFya1UOfFpL3QyQiIiKS+6z+RyIiIiIiUo/P5ppOIiIiIlIfJp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDptdQfwX3TmjxcYtu4sol6mFlpvSFtTbB7btoyiIiIiIlIfjnSKYOjaohNO67qVsfXsQwxbd7aMoipdBw4cgFQqRc+ePUu97YEDB8LT01P+/smTJ/D398fLly9L1N7Vq1chlUpx+/bt0grxo2NqaoqNGzeqOwwiIvp/2dnZpVLnv4RJpwievCo84QSAM7N7fNKJZ1hYGADg7t27+PPPP0u17Tp16qBWrVry90+fPv2gpJOIiKgs3bt3D0uXLkVsbGyBdWJjY7F06VLcv3+/DCNTLyadamJcXveTTTxjYmJw6dIlODk5AfhfAlpaFi1aBF9f31Jts6QyMzPVHQKAt7+Gc3Nz1R0GERGpIDIyEomJiQgMDMw38YyNjUVgYCASExNx5coVNUSoHkw61ehTTTz379+P3NxcTJgwAXZ2djh48CBycnIKXScuLg6mpqbYunWr0rL169fD1NQU//77LwDF6fVjx45h0KBBAIAePXpAKpVCKpUiKipKvq6zszMaN26Mxo0bw83NDefOnSvRdi1cuBD29va4ePEiXFxcYGFhgQULFgAA7t+/j5EjR8LGxgYWFhZwc3NDZGSkUhv379/HmDFj0KRJE1hYWKB9+/ZYt26dQp0tW7agQ4cOMDMzg729PWbOnInk5GT58vT0dEilUmzatAlz586Fvb09zM3NkZSUhNzcXPz4449o0aIFLCws0K9fv1IfaSYiog8zYMAAyGQypKamKiWeeQlnamoqLCwsMGDAADVGWraYdKrZ+4nnpyA8PBwNGjSAtbU1XF1d8fLlS5w/f77QdapVq4ZWrVph//79SssOHDiAtm3bonLlykrLHB0d4efnBwAIDg7GuXPncO7cOdSuXRsA8OzZM7i7u2PNmjUICAiApaUlhg0bhgsXLpRo25KTkzF79mx4e3vj9OnT8PT0xL1799CnTx+8evUKCxcuxLp162BiYoKBAwfizp078nXv3LmD3r17IyoqCjNnzkRwcDA8PT3x4sULeR0/Pz/4+vrCwcEBQUFBGDt2LA4cOIAhQ4YoJe4BAQHQ1tbGzp07cfToUVSoUAGrVq3CqlWr4OLiguDgYHz11VcYOXIkBEEo0fYSEVHp09bWxuDBg5USz/cTzkGDBkFb+/O5p/vz2dKPWF7i6eR7RN2hFOnmzZt49OgRpkyZAuDt6KOvry/Cw8Pl0+0FcXFxwcSJE/H48WNIpVIAwB9//IEHDx5g/Pjx+a6jr6+PKlWqAHibuOYlm3l++OEHhfdt2rTBixcvEBISgtatWxd7+zIzMzF//ny0bNlSXjZkyBBUqlQJISEhMDAwkPfTo0cPrF69GuvXrwfw9rIAIyMjhIWFyeu9205ycjI2bNgAFxcX+eUDbdq0QZUqVeDt7Y2jR4+ie/fu8voymQwzZ86Uv09NTUVQUBD69+8PHx8fhfaXLl1a7G0lIiLx5CWe27Ztw/379xEYGAgAn23CCXCkU2003YMUXpU9t+FW1L/qDqtIYWFh0NTUhLOzMwDA0NAQHTt2xPHjx5GSklLoup06dYKBgYHCaOf+/fthaGiIDh06lCiee/fu4dtvv4WTkxMsLS0hk8lw6tQpPH78uETtaWlpoXnz5vL3WVlZuHTpErp06SJPJAFAU1MT7dq1k0+xZ2Zm4sqVK+jVq5dCvXf9/vvvyMjIwNdff61Q3q1bN+jo6ODy5csK5a1atVJ4/+DBA6SlpSkkpgDQq1ev4m8oERGJ7v0Rz8854QSYdFIxZGZmIiIiAra2tihfvjySk5ORnJyMTp06ISMjAxEREYWur6+vj65du+LgwYMQBAE5OTk4fPgwunXrBj09vWLHEx0djf79+yM5ORlz585FeHg4IiIi0LVrV2RkZJRoGytVqgRNzf+dFklJScjOzsaWLVsgk8kUXnkXgQNvRzFzcnJQvXr1AtvOq1u1alWFci0tLZiYmCApKUmh3MTEROF9XFwcACj1UVifRESkXu8mnp9zwglwel0tQid1gKt9PXwxOgSxSa/VHY7KTp48icTERFy7dg2NGzdWWh4WFgZ3d/dC23BxcUFYWBiuXr2KN2/eIC4uDi4uLiWK59SpU0hLS8P69esVRhfT0tJK1F5+DA0NoaWlhYEDB2LgwIEF1jMyMoKWlhZiYmIKrGNsbAwAePnyJSwsLOTlOTk5iI+Ply8vSLVq1QBAKTnNS2aJiOjjlJd4amhoQEtLS93hqA2TzjLW0boWqhiWU3cYJRIWFgZ9fX1s3LhRYTQQeHtzUWhoKJ48eYIvv/yywDZatGiBGjVqYP/+/Xjz5g1q166NZs2aFdqvrq4uACiNXqalpUFHRwf6+vrysoSEBFy+fFl+HeiH0tPTQ/PmzfHbb79h2rRp8ljyi9He3h6HDh3ChAkT8p1it7GxgZ6eHg4dOoQ2bdrIy3/66SdkZWUpTOvnx8zMDOXLl8f58+dha2srLy/qJi4iIlK/z3V0813cA2VIR0sTywc2x7B153B1kbO6wymW+Ph4nD17Fi4uLnBwcFBaXrVqVYSGhiI8PBwTJkwosJ2860F37tyJ7OxsDB8+HBoaGoX2Xb9+fWhpaWHXrl0QBAE6OjowNzdH69atsXz5cgQEBGDUqFGIiYnB9OnTS/3EnjFjBtzc3ODm5oZBgwahZs2aSEhIkP8PR1OnTgUATJs2Df369YOLiwtGjBiBL774Ak+ePMG9e/cwf/58GBkZYdSoUVi1ahX09fXRvn17PH78GCtXroSNjQ06d+5caBwVKlTAiBEjsHHjRshkMjg6OuLWrVvw9/cvch8SERGpG5POMvRdT2scuf4Ej2KTi678kcl7Fqebm1u+y+vXrw87OzuEh4fD29u70CTIxcVFfsd33g1JhalcuTLmzJmDwMBAhIeHIzc3F6dPn4aVlRVWrFiBgIAArFu3DjVq1ED//v1hZmaGs2dL75mnFhYWOHjwIPz9/bF48WKkpKTAxMQEjRo1Uphyt7Kywr59++Dn54cffvgBmZmZqFWrFvr06SOvM3HiRBgbGyMkJAR79uyBsbExnJ2d8f3336s05TJ+/Hjk5uZi1qxZSEpKgkwmw5IlS+Dh4VFq20tERCQGDYEP+Ct1mu5BSmV1TMrjxMzusJ0aDh0tTSQED873ms7c3SPKKkwiIiKiMsO718uI35CWmBt6DekZ2eoOhYiIiKjMcaRTBPmNdL7cOEj+bw0NoHIFPfybmoGBq0/j6M1n8mUc6SQiIqL/Il7TWUYaTQmF1v/f8W2kr4O7K/qgww8/4V40H3dDRERE/30c6SQiIiIi0fGaTiIiIiISHZNOIiIiIhIdk04iIiIiEh2TTiIiIiISHZNOIiIiIhIdk04iIiIiEh2TTiIiIiISHZNOIiIiIhIdk04iIiIiEh2TTiIiIiISHZNOIiIiIhIdk04iIiIiEp22ugMoC1FRUeoOgYiIiOijUrdu3TLtT0MQBKFMeyQiIiKizw6n14mIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh02uoO4L/ozB8vMGzdWUS9TC203pC2ptg8tm0ZRUVEpGzUqFH4999/ERISAj09PXWHQ0T/YRzpFMHQtUUnnNZ1K2Pr2YcYtu5sGUVVug4cOACpVIqePXuqOxQFAwcOhKenZ5H1Vq9eDSsrqzKI6PNmamqKjRs3yt9funQJ69evL1YbcXFxsLKywo0bN+Rlnp6emDJlCgCgf//+mDNnTpF9XL16FVKpFLdv35aX7du3D4cPHy5WPKVNncfitm3bcP/+fQQGBiolnO9/dmXJysoKoaGhAACpVIqff/5ZvmzIkCFYsGCBWuIiKo7s7OxSqfNfwqRTBE9eFZ5wAsCZ2T0+6cQzLCwMAHD37l38+eefao7mf+rUqYNatWqpOwz6f2ZmZqhUqZL8/cWLF7Fu3bpitbFy5Uo0bdoUtra2KtUvqA99fX2YmpqiXLly8rK9e/fiyJEjxYrnv+L+/ftYtWoVNm3aBBMTE6Xl7392Hwtvb29s27YNz549U3coRAW6d+8eli5ditjY2ALrxMbGYunSpbh//34ZRqZeTDrVxLi87iebeMbExODSpUtwcnIC8L8EtCQyMzNLKaq3Fi1aBF9f31Jt83MkCAKysrI+uJ2IiAj06dOnxOu/evUK4eHh8PDw+OBYLC0t8csvv8DU1LRE65f2sapuMpkMkZGRBe6PD/3sikvVY65JkyaQSqXYsmWL+EERlVBkZCQSExMRGBiYb+IZGxuLwMBAJCYm4sqVK2qIUD2YdKrRp5p47t+/H7m5uZgwYQLs7Oxw8OBB5OTkFLneqVOnIJVKcebMGYwaNQrW1tbo1asXACAnJwfr169H+/btYW5ujhYtWmDBggXIyMhQaGP37t3o0qULLC0tYWNjg+7du+PAgQPy5flNr9+7dw/u7u6QyWRo2bIl/P39kZubqxSfKjHkbcOFCxcwduxYWFlZwd7eHrNmzcLr168V2ktISMDs2bPh4OAAc3NzODg4YNKkScjMzER8fDzMzc0RFBSkFMfq1ashk8mQkJBQ4L7s1asXvLy8EBgYCEdHR5ibm6Nbt244f/68Ut1jx47B1dUVDRs2hLW1Nb755hs8f/483/a2bNmCdu3awczMDCdOnMDChQthb2+Pu3fvws3NDRYWFnBycsLRo0cBACEhIXBycoKVlRUGDx6MFy9eKLT77hTt/PnzsWbNGqSkpEAqlUIqlcLBwaHAbQSA0NBQGBgYyH/gFKWwPt6fXu/evTuuXbuG48ePy+tOnDgRAOTbffHiRbi4uMDCwkI+pZuQkIBZs2ahRYsWMDc3R4cOHbBr1y6V4lP1WCxpH5mZmVi0aJH8mLCzs4O7uzvu3Lkjr6PquZbf9Pr58+fRp08fWFhYwNraGiNGjMDDhw8V6jg4OGDhwoVKsUmlUgQHB00bUoMAACAASURBVMvfF3TMqaJnz57Yv3//Zzc1SZ+OAQMGQCaTITU1VSnxzEs4U1NTYWFhgQEDBqgx0rLFG4nULC/xdPI9gq1nH34SNxaFh4ejQYMGsLa2hqurK2bMmIHz58+rnBj4+PjAy8sLs2fPlidq3333HX7++WeMHj0a9vb2ePz4MVasWIHo6GisXbsWAHD69GnMmDEDY8eORZs2bZCVlYUHDx4gJSWlwL6SkpLg4eGBKlWqwM/PD7q6uli/fj2io6OV6qoSQ56pU6di8uTJmDdvHu7duwcvLy9UqFABPj4+AICUlBT06dMHiYmJ+OabbyCTyRAfH4/jx48jKysLJiYm6NKlC3bt2oURI0bI283NzcWePXvQtWvXIqc2T506hZSUFISEhMDAwABr1qyBp6cnDh8+DHNzcwDAnj17MG3aNHz99dcYP348Xr9+DX9/f7i7u+Pnn39G+fLl5e2dPXsW8fHx+PHHH2FiYgJdXV3cuHED6enp8Pb2xqBBgzB69Gjs2LED48ePx4gRI3D16lV89913yMrKwuLFizF58mTs3Lkz33jHjx+PnJwchIWFya/R09Yu/Cvo3LlzaNy4MXR0dBTKx4wZIy/z8vKCoaFhsfsIDg7GqFGjULFiRXmS9O7+SE5OxuzZszFz5kzIZDJkZmYiLS0Nffv2RUpKCsaPH4969erhwoULmDVrFrKysjB48OACt0XVY/FD+lixYgX27NkDHx8fmJmZITk5GTdu3FA4R4pznL/rwoULGD58OFq0aIFVq1bh9evX8PPzg5ubGyIiIkp0WUt+xxwALF++HJaWlgAAf39/pUsr7O3tsXz5cty8eRNNmzYtdr9EYtPW1sbgwYMVrpsePXo0ACgknIMGDSrye/A/RaBSp9FvY5Gv9yWkZgg234epIdriuXHjhiCRSIS1a9cKgiAIycnJgkwmE8aPH1/kuidPnhQkEomwcuVKhfJr164JEolE2LZtm0L5zz//LEgkEuGPP/4QBEEQli9fLjg4OBTah4eHhzB8+HD5ez8/P6FBgwbC8+fP5WVpaWlCkyZNBEtLy2LHkLcNgYGBCvV8fX0VYvP39xekUqlw+/btAmONjIwUJBKJcPHiRXnZ6dOnBYlEIkRGRha6nT179hQaNmwoJCQkyMtyc3OFTp06CePGjRMEQRDS09MFGxsb+fs8L168EGQymRAcHKzQnpWVlZCUlKRQd8GCBYJEIhHOnTsnL4uPjxfq168vtGzZUnj9+rW8fPv27YJEIhFevnwpL2vQoIGwYcMG+fvly5cL1tbWhW7bu9tjYWEhLF68WKX6RfWRt79v3bolL+vTp48watQopbp52/3uZyMIgrB27VqhQYMG8uMhz5w5c4QmTZoI2dnZBcal6rH4IX306dNHmDRpUoHLVT3OBUH5s3NxcRHatm2r0H90dLRgamoqzJw5U17WsmVLYcGCBUp9SyQSYdOmTfL3BR1zqkhLS8v3PCT62GRlZQmbNm0SpkyZIsybN0+YN2+eMGXKFCE4OFjIyspSd3hljtPraqLpHqTwquy5Dbei/lV3WEUKCwuDpqYmnJ2dAQCGhobo2LEjjh8/XuiI47tatWql8P7cuXPQ0NCQt5mnXbt20NTUxNWrVwEA1tbWePHiBb777jv5KF9Rrl+/Djs7O3zxxRfyMgMDA7Rv375EMeRp3ry5wvsvv/wScXFx8ssMzp8/Dysrq0LvSm7atClkMpnCtOnOnTthamqq0uhNo0aNYGxsLH+voaEBR0dH+V3eN2/eRHJyMlxdXRXWq1GjBqysrJS2ydraGkZGRkr96OrqKnxmlStXhomJCVq2bKlwU07etYHvT7GXVHJyMt68eYPKlSuXSnvFpaWlpfQ5nz17FhYWFrCwsFAo79ChAxISEvD48eMC21P1WPyQPmxsbHD06FGsWLECV69eVZp+Lu5xnicjIwO3bt1Ct27doKWlJS+vWbMm7O3tS3xNWkHHXFEMDAxQrlw5xMXFlahforKSN+KZN9X+2Y5w/r/Pb4upxDIzMxEREQFbW1uUL18eycnJAIBOnTrhyJEjiIiIgLu7e5HtvH+nbHx8PARBQLNmzZTq5ubmyq9t7NixI1asWIGdO3fKpykcHR0xe/Zs1KtXL9++4uLi5FPN76pevXqJYshToUIFhffa2trIyclBdnY2tLS0kJiYCKlUmm9M7/Lw8ICvry/i4+ORlZWF06dPY+bMmUWuB0Ah4Xy3LC4uDoIgID4+HgAwbtw4aGhoKNTLzs5WSqjyu4MZAIyMjKCpqfj7VFdXV6n/vKnR968NLKm8dvLaLWuVKlVS2u74+Hj8/fffkMlkCuWCIABAodfhFudYLGkfU6ZMgaGhIQ4fPow1a9agQoUK+PrrrzF16lSUL1++2Md5nuTkZOTm5qJq1apKy6pWrap0XaeqCjrmVFGuXDm8efOmxOsTlZV3p9o1NDQ+24QTYNJZZlqZV8f5eYrPtHRZfhwHr0apKaLiO3nyJBITE3Ht2jU0btxYaXlYWJhKSef7jI2NoaOjg0OHDimMouR5d6TLxcUFLi4uSE1Nxa+//orFixdj9OjR+OWXX/Jtu1q1akhKSlIqT0xMLHEMqqhUqZJKI34uLi5YsmQJ9u3bh4yMDOjq6sLFxUWlPgq6I7JatWrQ0NCQXxO6ZMkSNGrUSKmuvr6+Sv2oS178eT9uPgbGxsawtrbGsmXL8l1es2bNAtctzrFY0j709PTg7e0Nb29vxMTE4OTJk1iwYAE0NDTg6+tb4uM874fHy5cvlZa9fPlS4QeIrq6u0o2FqalFP0auOARBQHJy8kf5SCei/OQlnhoaGvmee58LJp1l6F50Iuyn/+9O69eZRd/x/TEJCwuDvr4+Nm7cqDQCFB4ejtDQUDx58gRffvllsdpt27Yt1qxZgz///FPlh81XqFABnTt3xqNHj7By5UpkZ2fn+8uxSZMm2Lx5M9LS0uQ3ieTm5uLXX3/94BgK4+joiB9//BG3b9/ON+HLY2BgABcXF+zevRuZmZno3r27ytONt2/fRlRUFOrWrQsAePPmDX755Rf5KJatrS0MDQ0RGRmpNJ2qLrq6uiqPhOro6KBOnTp4+vSpaH0Upy7w9jhZt24ddHR0in2cF+dYLGkf76pRowY8PDzwyy+/4MGDB/K2S3Kc6+npwcbGBj/99BMmT54s/6P5/PlzREZGol+/fvK6tWrVQlSU4o/p0n4kTHR0NHJzc1G/fv1SbZdITJ/r6Oa7uAfKUG6ugLSMT/MRH/Hx8Th79ixcXFzyfcxN1apVERoaivDwcEyYMKFYbTdt2hQuLi7w8fHB3bt30axZM2hpaSE6OhonT57EnDlzULduXfzwww/Izs5Gs2bNULVqVTx79gwhISFo2bJlgSfz0KFDsW3bNnz33XeYM2cOdHV18eOPPypdC6ZqDKoaNmwYDhw4gMGDB2PcuHHyRyAdP34cixYtUpieHzhwILZv3w4AxXoeZbVq1TB06FBMnDgR+vr6CAoKQmJiIry9vQG8TWhnzJiBadOmISUlBV26dEHFihURGxuLS5cuwdHRUf7IqrJiZmaGzMxMbN68GU2aNIGenp7SNPK77O3tcevWLdH6MDMzw/79+3HixAlUr14dxsbGqFOnToFtDxs2DIcPH4a7uzs8PT1hZmaG169f4/Hjx7hy5YrCI4Hep+qx+CF9DBw4EM2bN4eFhQUMDQ3x+++/4/Lly/Dy8gLwYcf5xIkTMXToUAwZMgSDBg2SPwlBX19ffrkL8PZxRrNmzcKRI0fQqlUr3Llzp8BR25K6efMmAOVrq4no48akswxJqhni71XuSErPxN7fHmPJwd+RkyuoOyyV5D2L083NLd/l9evXh52dHcLDw+Ht7a10DWFRli1bhkaNGmHv3r3YsmULdHV1Ubt2bTg5OaFKlSoA3o4UhYSEICIiAsnJyahatSo6dOggf7ZifoyNjbFjxw7MmzcPbdu2hZGREXr27Inhw4cr/fFWJQZVVahQAfv27cPKlSsRFBSEhIQEVKlSBQ4ODkrXKJqamsLU1BQ6OjqwsbFRuY+mTZuiTZs2WLlyJV68eIH69esjKCgIZmZm8jp9+/ZF9erVsWHDBvj4+CA7OxvVq1dHixYt1PJfL3bo0AH9+/fH6tWrkZiYiOrVq+PixYsF1u/RowfCw8Px9OnTQpPBkvYxZswYPH78GBMmTEB6ejq+/vpr+Pn5Fdh2+fLlsW/fPgQEBGDr1q2IiYmBkZER6tevX+TIoarH4of0YW9vj+PHj2PTpk3IzMxEnTp1MHnyZIXHchXnOH/3PG7dujU2b94Mf39/TJgwAdra2mjevDl8fHwUHpfk5uaG2NhYLF68GPHx8bCxscGyZctK9QfOqVOnlG7KIqKPn4aQd3U6lRpNd+UHftc2KQ+bupVxPzoJDWoYYfVwB4Rc+Atz911XqJe7e4TSuvTf9uTJE7Rr1w6+vr4qPyS4V69eqFevHgICAkSOTr0EQUC7du3g6uqK8ePHqzucz8br169haWmJpUuXlun/SqSK9PR02NvbY8GCBfj666/VHQ4RFQMfmVRGnsWnIeL6UzyKTcYvvz/D1J2R6NeS1yN9zmJiYnD58mVMmzYNlStXVvkGos+JhoYGJk+ejK1btyI9PV3d4XwW7ty5g4CAAGhoaBRr5L2sbNu2DbVr10aPHj3UHQoRFROn19UkKycXWprFm4Km/5bQ0FD4+fmhXr168mvjSFmPHj3w/PlzREdHl/j/TSfVzZw5E3FxcZgxY8ZHub8NDAywdOnSz/oOYKJPFafXRZDf9HqHRrUQl/wa/8SlwPSLigge0wbHb0fju+2XFepxep2IiIj+i5h0EhEREZHoeE0nEREREYmOSScRERERiY5JJxERERGJjkknEREREYmOSScRERERiY5JJxERERGJjkknEREREYmOSScRERERiY5JJxERERGJjkknEREREYmOSScRERERiY5JJxERERGJTlvdAZSFqKgodYdARERE9FGpW7dumfanIQiCUKY9EhEREdFnh9PrRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDomnUREREQkOiadRERERCQ6Jp1EREREJDptdQfwX3TmjxcYtu4sol6mFlpvSFtTbB7btoyiIiIiIlIfjnSKYOjaohNO67qVsfXsQwxbd7aMoipdBw4cgFQqRc+ePdUax82bN+Hq6gpLS0tIpVL88ccfyMjIwKRJk2BrawupVIo5c+bgxo0bkEqluHHjRrHaNzU1xcaNG4sd18CBA+Hp6VlonVOnTkEqleLRo0fFbv9DpKenQyqVIiQkpNB6qmyDGEq6zwEgLCwM7dq1g5mZGWxsbEo5MvE8efIE/v7+ePnypdKyD9kfYrl69SqkUilu374tLxPjeDl+/Djs7OyQnp4uL7OyskJoaCgAQCqV4ueff5Yv27ZtG6RSqfxlZmYGJycnrFy5EpmZmQpt59V99/wbN26cwvrm5uZwcnLCkiVLkJaWprC+p6cnpkyZAgDo378/5syZI192/fp1WFlZIS4uTqXt/Bg/Y/pw2dnZpVLnv4QjnSJ48qrwhBMAzszuASffI9h69iEAfHIjnmFhYQCAu3fv4s8//4S5ubla4pg2bRoqVqyILVu2oFy5cpBKpdi1axd++ukn+Pn5oXbt2jAxMUFSUhKkUinKlStXrPbNzMxQqVIlkaL/uNWpUwc6Ojpl3m9J93l8fDymTp0KDw8PODs7Q1dXV4ToxPH06VP4+/ujY8eOqFq1qsKyT+UYLO3jJScnB0uWLMHIkSNhYGBQrHXnz5+PL774Aqmpqfj555+xevVqpKenY+bMmUWua2RkhJUrVwIAMjIycPPmTQQFBeGff/7BunXrVOq/SZMmsLOzg7+/PxYsWFBk/U/lMybV3bt3D/v374enpyeqV6+eb53Y2Fhs2rQJrq6ukMlkZRyhejDpVBPj8rqfbOIZExODS5cuwcnJCWfOnEFYWBimT5+ullj++usvTJ06Fc2aNZOXPXr0CF9++SW6du0qL6tZsyZOnDhR7PYjIiJKJc5P0aJFi9TSb0n3+T///IOcnBz06dMHVlZWHxxHdnY2NDU1oamp3gmhT+UYLO3j5cSJE3jy5An69etX7HVbtGiB+vXrAwB69uyJXr16Yffu3Solnbq6umjXrp38fdeuXZGUlISwsDC8efNG5R+uAwYMgJeXF7777rsiE8pP5TMujo/l/FGXyMhIJCYmIjAwEKNHj1ZKPGNjYxEYGIjU1FRcuXLls0k6P8+j4SORl3h+alPt+/fvR25uLiZMmAA7OzscPHgQOTk5CnXypo4vXLiAsWPHwsrKCvb29pg1axZev35dZB/p6emYN28eWrZsCTMzM3z11VcIDAyEIAgAgNDQUEilUuTk5GDBggUKU2IhISH466+/FMryXu9Prx87dgxubm6wsrJCo0aN4OzsrJCcvj/t9eDBA3h7e6N169aQyWRwcnLC3LlzkZKSUuL9GRcXJ99Htra2mDFjhsJ0IgCsX78ezs7OaNy4MRo3bgw3NzecO3dOoc6H7POEhAS4uLigXbt2ePbsGQDl6dLitH/v3j3069cPMpkMDg4OWLVqFQICAlRKBt/f5wsXLoS9vT0iIyPh6uoKCwsLODk5YcuWLfI6U6ZMgZubGwCgV69ekEqlmD9/vnz5li1b0KFDB5iZmcHe3h4zZ85EcnKyfHneJQebNm3C3LlzYW9vD3NzcyQlJaFXr17w8vLC4cOH0aFDB1hYWKB379548OABMjIyMH/+fDRt2hR2dnaYOXMmMjIyFLanqM/u2LFjGDRoEACgR48e8mM1Kioq3/0BAOfPn0efPn1gYWEBa2trjBgxAg8fPlSokxf3zp078dVXX6Fhw4bo2bMnLl26VORnEB0dDS8vL/l+cHBwwOjRo5WmmN9V0PFy4sQJDB06FJaWlgUe3/nZvXs3Wrdu/cEjgBoaGrC2tkZ6ejoSExNL1IaxsTFycnLk3z+q+Oqrr6Crq4vw8PAi65bkmM9PfHw8zM3NERQUpLRs9erVkMlkSEhIkJcVdV5ERUVBKpXi1KlTCm399ttv8suZgMLPn/yo+l2SmpqKOXPmoGPHjrC0tESLFi0wevRo/P333/m2d/XqVXh7e6NRo0aws7PDjz/+CAC4fPkynJ2d0bBhQ3Tr1g2//fabUkzHjh2Dq6srGjZsCGtra3zzzTd4/vx5ofu7MAMGDIBMJkNqaioCAwMRGxsrX/ZuwmlhYYEBAwaUuJ9PDZNONXs/8fwUhIeHo0GDBrC2toarqytevnyJ8+fP51t36tSp6NSpE06dOoVly5bh0KFDCAgIKLR9QRDg6emJvXv3wtPTE5s2bUL79u2xZMkS+VRVt27d5H+4x48fj3PnzslfPXr0gEQiUSjLb1osJCQEY8aMQZUqVbB8+XKsWbMGnTt3lidd+YmOjkbt2rUxa9YsbN26Fd9++y3Onz+P4cOHq7r7lEyePBl169bFxo0b8e2332L//v2YOnWqQp1nz57B3d0da9asQUBAACwtLTFs2DBcuHBBqb3i7vPo6Gi4ublBEASEhoaidu3ahcZbVPuJiYnw8PBASkqKfHrx8uXL2L17dzH3zP8kJydj3rx5+P7773Hu3DkMGTIEvr6+OHPmDABg+vTp8Pf3BwAEBwfj3LlzGD9+PADAz88Pvr6+cHBwQFBQEMaOHYsDBw5gyJAhSj+WAgICoK2tjZ07d+Lo0aOoUKECAODGjRtYv349xo8fj2XLliE+Ph6jRo3ClClTkJKSgsWLF2PkyJHYs2cP1q9fr9BmUZ+do6Mj/Pz8FGI/d+5cgZ/DhQsXMHz4cOjr62PVqlVYsGABHj9+DDc3N0RHRyvUPXfuHE6dOoXAwECcPHkSpqamGDlyJP79999C9/e4cePw8OFD+Pr6YteuXZg9ezZMTEyUrotUxdSpU9G7d29cvnwZ69atw6lTp5SO7/dlZmbit99+g729vdKy5cuXo3nz5gAAf39/2NraFhlDdHQ0jI2NUbFixSLrCoKA9PR0pKenIyEhAadPn8bevXvRqVMn6Ovry+uNGTMGHh4eAAAvLy/5j548urq6sLGxwenTp4vsMz9FHfP5MTExQZcuXbBr1y6F8tzcXOzZswddu3aVJ/HFOS9UVdD5U5CivktSU1ORkZEBLy8vBAcHY968eXjz5g1cXV0RHx+v1N6UKVNQs2ZN+Pv7w8XFBQEBAVi4cCF8fHzQr18/+Pv7w8DAAKNHj1YYKNizZw/GjBmDunXrYs2aNVi8eDH++usvuLu7F/pDqzDa2toYPHiwUuL5fsI5aNAgaGt/RpPOApU6jX4bi3y9LyE1Q7D5PkwN0RbPjRs3BIlEIqxdu1YQBEFITk4WZDKZMH78eIV6J0+eFCQSiRAYGKhQ7uvrKzg4OBTax5kzZwSJRCLs3btXoXzGjBlCgwYNhJiYGHmZRCIRNm3apFDPx8dH6NSpk0LZ9evXBYlEIly/fl0QBEFIS0sTGjVqJIwaNarQWBo0aCBs2LCh0Dp3794VJBKJcP/+fXmZh4eHMHz48ELXy9tH06ZNUyjfsWOHIJVKhT///LPQ9UeNGiWMGTNGqb2i9nlaWpogkUiEHTt2CPfv3xdatGghDB48WEhLS1NY7/1tULX9lStXCubm5kJcXJy8LCMjQ2jevLlgaWlZ6DYJgvI+X7BggSCRSITff/9doV63bt2ESZMmyd9funRJkEgkwt27d+VlSUlJgkwmU6gnCIJw6NAhQSKRCEeOHFHYJ3379lWKp2fPnoKVlZXw77//ysuOHj0qSCQShf0vCILwzTffCB06dChyG9//7C5cuKAUe57394eLi4vQtm1bITs7W14WHR0tmJqaCjNnzlSI287OTuFzTUxMzPfceldWVpYglUoLrRMZGSlIJBLh1q1b8rKCjpclS5YorHvkyJEij++875nTp08XWCc/W7duFSQSiXD79m0hLS1NiI2NFTZv3ixIpVJhx44d+db966+/5GVjx44VJBKJ0mvAgAFCcnJysWIRBEFYuHCh0LBhwyLrlfSYz0/eZ3Px4kV52enTpwWJRCJERkYKgqD6efHPP/8IEolEOHnypEK998+1ws6f/HzI34fs7GyhSZMmwpYtW5TaW7p0qULdrl27ClKpVPjjjz/kZQ8ePBAkEolw+PBhQRAEIT09XbCxsRHGjRunsO6LFy8EmUwmBAcHq7RNBcnKyhI2bdokTJkyRZg3b54wb948YcqUKUJwcLCQlZX1QW1/ijjSqSaa7kEKr8qe23ArqvDRh49BWFgYNDU14ezsDAAwNDREx44dcfz48XynmPNGJPJ8+eWXiIuLK/SX9JUrV6ChoaF0Z7yzszNycnJw7dq1D96OGzduIDU1Fe7u7sVaLzs7G5s2bUKvXr1gZ2cHmUwGV1dXAMDjx49LFEu3bt2U3guCoHApwL179/Dtt9/CyckJlpaWkMlkOHXqVL59qrrPr127hn79+qF58+bYtGmTyjdrFNX+jRs3YGdnp3BDzPvXyRWXvr4+rK2tlfotavrr999/R0ZGBr7++muF8m7dukFHRweXL19WKG/VqlW+7dja2ipM85qamgKA0jaZmprixYsXCmXF+eyKkpGRgVu3bqFbt27Q0tKSl9esWRP29va4cuWKQn1ra2uFz7VixYowNjYudL9pa2vDwsICAQEB2LJlCx48eFDsON/l6Oio8L5t27ZKx/f78qYiTUxMStRnr169YGVlhRYtWsDX1xdDhw6Vj0oWxdjYGHv37sXevXsREhKC+fPn4++//8bw4cORlZVVrDgqV66M169fl+jym5Ie802bNoVMJlMY7dy5cydMTU3RtGlTAMU/L1RV0PlTEFW+q44dO4YBAwbAwcEBFhYWsLS0RGJiYr7nT/v27RXem5qaokaNGrCwsJCX1a9fH1paWvLz9ObNm0hOTpZ/j+epUaMGrKyscPXq1WJt0/veH/H8bEc4/x+TTlJZZmYmIiIiYGtri/LlyyM5ORnJycno1KkTMjIy8r0Y/v3pFW1tbeTk5BT6mIikpCQYGhoqXbCfl8SU9Lqsd+Vd11TQXYUFWbx4MVasWAEXFxcEBwfjyJEj2LFjBwAoXcunqvf/sFaqVAlaWlqIiYkB8HZqsH///khOTsbcuXMRHh6OiIgIdO3aNd8+Vd3nZ8+eRWpqKgYMGFCsL7+i2o+Li0OVKlWU1suvTFXly5dXKtPR0Slyn+cdK+/fEa6lpSV/qsG7CkpyjI2NFd7n3Rn//nStnp6eQkzF/eyKkpycjNzcXKXtAd5u4/vnRn7Tm9ra2kX2HRQUhNatW2Pt2rXo0qULHBwcsGnTpmLHCyjvuwoVKigc3/nJm8Yv6RMIfvzxR+zduxfr1q1D06ZNsXnzZhw6dEildbW1tdG0aVM0bdoULVu2hIeHB5YsWYJr167h4MGDxYoj7zvszZs3xd6Gkh7zAODh4YFffvkF8fHxiImJwenTpxWuGyzueaGq4v5IKOq75Pjx4xgzZgzMzc3h5+eHQ4cOISIiArVr1853P7x/Purq6iodf5qamtDS0pKvnzdNP27cOMhkMoXXjRs3SuXvzbuJ5+eccAK8e71M1TEpjzWerdDOsiZeZ+Yg4OgdzA8r3nMj1enkyZNITEzEtWvX0LhxY6XlYWFhxR45zE/FihWRkpKidKdo3jMMS+PRIpUrVwbw9k78hg0bqrze/v37MXToUAwbNkxe9u6zCkvi1atXCu8TEhKQk5ODGjVqAHh7kXxaWhrWr1+vMGpV0muN8nh5eeG3337D8OHDsXnzZoUn5Xm/fAAAIABJREFUAHyIatWqKW0ToLydZSHvD87Lly8VRjtycnIQHx+v9AeptJX2Z2dkZARNTc18n+f58uXLUtueGjVqyO9Gf/DgAfbu3YsFCxbgiy++UBqZL0psbKzCvn/16pXC8Z2fvO1496aW4rC0tJTfvd6mTRt06dIFCxcuRMeOHRWuy1SVmZkZAOD+/fvFWi8vYSnrxyG5uLhgyZIl2LdvHzIyMqCrqwsXFxf5clXPCz09PQDKz5JMTS36sYCl4cCBA7CxsVF4BiqAfK/nLKm8z2bJkiVo1KiR0vKSHC/5yUs8NTQ0FGYpPjcc6SwjmhoaODq9K+5HJ8J84j409gnHsd+ji17xIxIWFgZ9fX3s2LEDO3fuVHj16dMH165dw5MnTz64n+bNm0MQBBw5ckSh/MCBA9DS0oKdnd0H99G4cWNUqFBB6YL7wgiCgDdv3sDIyEih/EMfd/L++keOHIGGhob8Bom0tDTo6OgofPklJCSUeAosj7a2NlavXg0nJycMGzZMaWq2pGxtbXHt2jWFxCgrK6vEN1R8CBsbG+jp6SmNcv3000/IyspSmt4rbap+dnkjekWNYunp6cHGxgY//fSTwhTk8+fPERkZKcr2mJmZYcaMGdDV1S3RVPvhw4cV3h86dEjh+M5PXsL49OnTYvf3Pn19fUydOhVxcXHFOt/fdefOHQD/+7GqqmfPnuHLL78s81EtAwMDuLi4YPfu3dizZw+6d++u8L2l6nlRtWpVaGtrK32vl9Z3RVHS09OVRi/PnDmj0tMPVGVrawtDQ0NERkaifv36Sq+aNWuWWl/a2tqfdcIJcKSzzLi1lCArOxffh/zvZI3+98NGqspSfHw8zp49CxcXFzg4OCgtr1q1KkJDQxEeHo4JEyZ8UF+Ojo5o3rw55syZI38EyLlz57Bz504MHz4c1apV+6D2gbdfyj4+Ppg1axZGjhwJZ2dnlC9fHvfu3UO5cuUURjLzaGhooHXr1ti9ezfat2+PWrVq4ciRIyo9EqUw58+fxw8//AAnJyf88ccf8PPzQ48ePeSjK61bt8by5csREBCAUaNGISYmBtOnTy+VP2RaWlrw9/fHxIkTMXz4cAQFBaFFixYf1OawYcOwfft2DBkyBN7e3tDV1cWmTZugoaEBDQ2ND465OIyMjDBq1CisWrUK+vr6aN++PR4/foyVK1fCxsYGnTt3FrV/VT+7vOvMdu3aBUEQoKOjA3Nz83ynlydOnIihQ4diyJAhGDRoEF6/fg1/f3/o6+tj9OjRHxxzdHQ0JkyYgJ49e0IikUBTUxMRERHIzs4u9jV7wNvH6/j6+uKrr77C3bt3lY7v/NSsWRO1atXCrVu3lK61K4kuXbrA0tISGzduxMCBAwudts/MzJQ/Iig7OxuPHz9GUFAQjIyMFEYLVXHz5k3Rf9gUZODAgdi+fTsAKF3Pqup5oaWlhW7duslnQmrXro1jx47hwIEDZbINjo6OWLx4MU6fPg0HBwfcvHkTs2fPLvZ/FlAYAwMDzJgxA9OmTUNKSgq6dOmCihUrIjY2FpcuXYKjoyN69epVav197ph0lpHmDarhUWwyzszpgcb1THDzn3h8E/wr7j5NKHrlj0DeszjffyxInvr168POzg7h4eHw9vb+oL40NDQQFBSE5cuXY/Pmzfj3339Rq1Yt+Pj4YOTIkR/U9rs8PDxQpUoVrF+/Ht999x20tbXRoEED+aN28rNgwQLMnTsXbm5uyMnJQbNmzbBu3boC94sqVqxYga1bt2LMmDHQ1taGi4uLwkOsrayssGLFCgQEBGDdunWoUaMG+vfvDzMzM5w9++HPdtXS0oKfnx8mTZoET09PBAUFoWXLliVuz9jYGDt27MC8efPg7e0NY2Nj9O/fHxYWFmp5CPbEiRNhbGyMkJAQ7NmzB8bGxnB2dsb3338v+qiDqp9d5cqVMWfOHAQGBiI8PBy5ubk4ffo06tatq9Rm69atsXnzZvj7+2PChAnQ1tZG8+bN4ePjg1q1an1wzEZGRpBKpdi2bRtevHgBbW1tmJmZYf369SW6BCPv+B47diy0tbXRu3dvlR7S3rNnT0RERGDOnDkf/GNFQ0MDkydPxvDhw7Fv375CbypKTk7GiBEjALw9N2rUqAFHR0d4e3vjiy++ULnPJ0+e4NGjR5g7d+4HxV5SpqamMDU1hY6OTr7/Jayq58XcuXMxd+5c+Y1U7dq1w9y5c/Htt9+Kvg1DhgzBq1evMG3aNCQlJcHU1BS+vr6l/h8R9O3bF9WrV8eGDRvg4+OD7OxsVK9eHS1atCiV/2iC/kdDEIrxtFtSiaa78oN590xoD1f7enBefgwnbz/HDFdb9GsphcWkfcjJ/d9HkLt7RFmGSlQmcnNz4ezsjBo1amDDhg3qDofKwKlTpzBixAgcP35cPl1eHE+fPkW7du2wc+fOUrveuCz5+/vj4MGDOHnyZJmP8ANvk9527drB19f3s3r4OH3cONJZRl5nZuPig1hEXH97jZJv6HVM/doGDWoY4c/nJbtTkOhj5efnB6lUilq1aiEhIQH79u3DH3/8gRkzZqg7NPpE1KlTB+7u7li3bt0nl3Smp6dj27Zt8PX1LfOEMyYmBlFRUQgICEDlypWLfUkAkZiYdJaRO08TIKlmqFTOcWb6L8rJyYGfnx9iY2OhqamJhg0bIigoSG3Xt9GnacKECdixYwfS09NL9To+sT179gyjR49G9+7dy7zv0NBQ+Pn5oV69evJrfYk+FpxeF0F+0+u1Tcrj3ko39F5xAqfvPsd0l8bo36o+Gk4KRa7A6XUiIiL6b2PSSURERESi43M6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdEw6iYiIiEh0TDqJiIiISHRMOomIiIhIdNrqDqAsREVFqTsEIiIioo9K3bp1y7Q/DUEQhDLtkYiIiIg+O5xeJyIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMekkIiIiItEx6SQiIiIi0THpJCIiIiLRMeksZbdv30bfvn1hZWUFqVSK4OBg3LhxA1KpFCEhIWUSQ69evdC9e/ciy+jjk56eDqlUivnz56s7lGL7lGMnok+fqakppFIppFIpHj16JC8fOHCgvFzVv8MFtUUfRlvdAYgtPT0dVlZWKte/c+cODAwMStRXRkYGRo8eDT09PcyZMwcVKlRAw4YN8e+//5aoPaLiOHPmDH7//XeMGzcOOjo6ovTx+vVrbNiwAU2aNIGjo6MofVDZ27FjB7S0tNC/f391h0JE/2H/+aRTV1cXkyZNUii7c+cOjh07hk6dOiklpLq6uiXu6+HDh4iJicH8+fPh5uYmL69Tpw7u3LkDPT29Erf9ocLCwtTWN6nOwMAA9+7dg7Z28U/N06dPY/v27Rg5cqRoSeebN2/g7++PESNGKCWdHxI7qdf27duhp6fHpJM+aSNGjEBOTg4AoFKlSmqOhvLzn//roK2tjW+//VahbM+ePTh27Bi++uor9OvXr9T6evXqFQDAyMhIoVxTU7PEo6elRawk5FOWkZEBbW1taGlpibpOcZXVjxMxtkWdP6yI6PPm4+Oj7hCoCLym8x0JCQmYPXs2HBwcYGZmBgcHB8yaNUul6fEOHTpg+PDhAABvb2/5tSDvvt69luTAgQOQSqU4c+YMfvzxRzg6OsLc3Bzt2rXLd1Ty5MmTGDt2LBwdHSGTydCkSROMGDECd+/eVWnb3r+m09/fP98Y814PHjyQ183MzMTatWvRuXNnyGQy2NjYwNPTU6W+ly1bBqlUmm/dpKQkyGQyjB8/Xl6Wk5ODwMBAdOrUCTKZDLa2thg1ahTu3bunsG7edbJ79uxRanfmzJmQSqXIyMiQl02dOhVSqRRPnjyBl5cX7Ozs0LBhQzx9+rTA2ItaJy4uDrNmzUKrVq1gZmaGFi1aYPr06YiPj1dq69mzZxg3bhysra3RqFEjDB8+HI8fP0bnzp3Rq1cveb2Cros8cuQIevfuDVtbWzRs2BBt2rSBl5cXYmJiAABubm7Yvn07AMivJ5ZKpQgNDVVpW4KDg+Hh4YHmzZvLj/3p06crHPu//vor7OzsAABBQUHyPvJGPAuKXdXPNCYmBlKpFCtWrMCBAwfQvXt3yGQyNG/eHIsWLZKPYBSmT58+aNq0KbKzs5WWnTlzRuk8VPWcX79+PaRSKaKiopTa7dChA1xdXYuM7d3tO378OHr27AkLCws4OTlh79698jrffvstbG1tYWVlhYkTJyItLU2hnaFDh8LBwUGp/ZcvX0IqlWLp0qXysuzsbKxZswadOnWCpaUlrK2t0blzZ8ydOxfA289GKpXi4cOHuHPnjsJ3wLNnz4rcpvfdvXsXXl5e6NSpE5o0aQJTU1NYWVmhc+fOWLRoERISEpTW2bdvH9zc3GBjYwNTU1PY2dmhS5cumDRpEq5fv15kn69fv4afnx+6dOkCS0tLmJmZwd7eHq6urpg1axZevnypUD86Ohpz585Fu3btYGFhAUtLS3Tu3BlLly7NNz4AOHjwIDw8PNCkSROYmZmhWbNm8PT0xJkzZ5Tqvnst4O3bt7F06VKF7/e8c1IVERER8PT0RNu2beX7p3Hjxujduze2bNmidE5ER0fDx8cHbdq0gbm5ORo2bIhWrVph4MCBWLFihUp9vn/945YtW9CpUyeYm5vDwcEBy5cvR2ZmptJ6xdmvxfnMirNNql6HefjwYfn3S2HbVJDY2Fj88MMP6NChg3xbe/XqheDgYJW+pz5n//mRTlWlpKSgd+/eiIqKgpubGxo1aoQ7d+5g586dOH/+PA4dOqQ0gvmuFStW4OLFi1i2bBnGjx+Pli1bypf99ddfmD17dr7rzZkzB40aNcKSJUugq6uLwMBATJkyBfXq1ZP/gQfejs5mZmbCzc0N1atXx9OnT7Fr1y707dsXhw4dQv369Yu1vc7OzmjcuLFC2Zs3bzB9+nRkZGTA2NgYwNs/WkOHDkVkZCScnZ0xePBgpKSkYPfu3XBzc8OePXvQqFGjAvtxd3fH+vXrsW/fPlhaWiosO3jwIDIzM9G3b1952YQJExAREYHWrVvDw8MDr169wrZt29C7d2/s3LlTKebiGjBgADp37ozVq1crbGdx13n+/Dl69+6N169fw93dHXXr1sWTJ0+wfft2XLp0CYcOHYKhoSGAt4lN3759ER8fj0GDBqF+/fq4fv06+vfvD01NTVStWrXQ/n/55Rd4eXmhZcuWmDx5MsqVK4cXL17g3LlziI2NRY0aNTB37lysWrUKx44dQ3BwMMqVKwcASsdFQdu/bt06ODo6ok2bNjA0NMSdO3cQFhaGa9eu4fDhw9DV1YWVlRUCAwMxevRodO/eHR4eHgCKHt0s7md6+PBhlC9fHt7e3qhRowaOHTuGtWvXomLFihg3blyhfbm7u+P777/HiRMn0KVLF4Vle/fuRbly5eRJ/oee8yV14cIF/B975x0WxdX98e+CgGLBAnbQWdrSFAEVGyI1asACKNiJIETUN8ZYEguxoNG8RElUsCYRRQRURCOKooLYiIJBlEQJoagoIEpnkd37+4N357fDLrC7gCZxPs/jI3PmzO33zJ17z70bEREBT09PuLq6IiYmBmvWrIGqqiqCg4NhYWGBpUuXIiMjA2fOnIGamhq++eYbheLaunUrfv75Z3h4eGDhwoUQCoXIz89HUlISgIZVmIiICKxatQoqKioICgqin22pXUrjyZMnOHfuHENWXV2NJ0+e4MmTJ0hISMDZs2fRpUsXAMCRI0foAbCI169f4/Xr13j8+DGMjIxgYWHRbJwrVqzAhQsXGLKSkhKUlJTg/v37mDFjBp2XtLQ0eHt7o6KiQiLdT548QWxsLCIjI6GjowMAEAqF+OyzzyTy9OrVK1y9ehVXr16Fv78/Vq1aJTVtfn5+9IchAOTm5mLVqlUYOHAgrK2tm80XAFy/fh1Xr15lyMrLy5Geno709HTcvXsXu3fvBtBgv2fMmIHCwkJa9+3btygsLERhYSHu37+PFStWtBinOLt378bLly/p6xcvXmDv3r148uQJ9u3bR8vlLVdZ66w98hQdHY2MjIwW89QUmZmZmD9/vsRAOjMzE5mZmbhy5Qp+/PFHdnWxKcgHSGRkJKEoikRGRtKyb7/9llAURcLDwxm64eHhhKIosnXr1hbDTUxMJBRFkbi4OIY8LS2NUBRFjh49SstOnz5NKIois2bNYuhWVlYSU1NTsmzZMgl5Y3JzcwmPxyNffvklQ+7i4kImTZrUoqwxS5cuJVwul1y8eJGWHTp0iFAUReLj4xm6ZWVlxNraWiL90pg7dy4xNzcntbW1DPnkyZPJ2LFjiUAgIIQQcv36dUJRFFmyZAkRCoW0XlZWFtHT0yOurq60TFSm4nUoYu3atYSiKEZ8q1evJhRFkc2bN7eYXlme8fX1JUOGDCF5eXkMeVpaGtHV1SUhISG0bNu2bYSiKHLhwgWG7nfffUcoiiIuLi60rKqqilAURTZt2kTLPv/8c2Jubk7q6+ubTe+GDRsIRVGkqqpKrrwQIr19xcbGEoqiyJkzZ2hZaWkpoSiKBAUFSehLS7s8dVpYWEgoiiLDhg0jb968YYTt5eVFRowY0UzuG6ipqSFDhw4l3t7eDPmrV6+Ivr4++fzzz2mZPH0+NDSUUBRFcnNzJeK0t7cn06ZNazFtovzxeDxGOEVFRcTAwIBwuVyyd+9exjO+vr5EX1+fUafz588no0aNkgi/qKiIUBRFtm/fTstGjx5NfH19W0ybk5MTox0qyoMHD8iJEydIZmYmefr0KXn58iV58OABmTVrFqEoilAURQ4dOkTre3h4EIqiCJfLJQkJCeTly5fk8ePH5NKlS2TdunXk1KlTzcb39u1boqurSyiKImPHjiUZGRnkxYsX5MGDB+TUqVMkICCAPH78mBBCSF1dHRk3bhydDhcXF3LhwgUSFxdHbGxsaLm7uzsdfkREBC3X09Mju3btIlevXiVff/01LacoiqSkpNDP6Onp0fKhQ4eSuLg48uDBA+Lu7k7L/fz8ZCrPCxcukPPnz5M//viDFBYWksLCQpKSkkJGjRpFh/Xw4UNCCCE3btygZd7e3iQnJ4c8f/6c3Lt3jxw+fJj4+PjIFOfs2bPpcAwNDUlcXBx59eoVOXPmDDEwMKDvid4T8parPHUmb57Eyz47O1tqniiKIl9//TW5cOECCQwMZMjF333Swqqvryfjx4+n5V988QXJyckhDx8+JFOnTqXl4vafhQm7vP4/EhIS0LNnTwlHei8vL2hqauLixYvtEu+0adMY1507d4auri7++usvCbkIoVCIuro69O3bF0ZGRrh//36r0xESEoJz585h5cqVcHJyouWxsbHQ1tbGhAkTwOfz6X9qamqws7NDampqi8sSXl5eKCsrQ0JCAi3LzMzEo0eP4O7uDiWlhmYoKuOAgABwOBxal8fjwcHBAQ8ePGDMGiiC+Kyqos9UVFTgypUrcHJyQp8+fRjlYmxsDIqikJKSQusnJiZCR0cHzs7OjHB8fX3pvDdH165dUVlZicTERAiFQrnT31xeRIi3L4FAAD6fD1tbWygpKbWqfSlSp3Z2dtDQ0GDIhgwZguLiYlRXVzcbX8eOHTFlyhQkJyczwj19+jTq6+sZ+X9ffd7BwQGDBg2ir7W0tMDlcgE0LJ2LM3LkSNTX1yu01A00tJ2srCz8/vvvCqdXHkxNTdG3b1/8/PPPWLJkCWbNmoUlS5Yw4k9PT6f/Fvm6E0JQUFCAkpIS9O/fHw4ODti8ebOEfWyMkpISPdPO5/ORn5+PyspKGBoaYtq0adi9ezf09fUBALdv36bLUVlZGfv374ezszNcXFzw3Xff0WHeu3ePdqMQd3Xy9PTEf/7zH9ja2iIwMBBjxoyh7zW1ZL5kyRK4uLjA1NSUdr8CIGHfm8Le3h4VFRXYuXMnfHx8MGvWLKxduxbl5eW0jqg8O3XqRMsqKirw9OlTCIVCmJubw9vbGwcOHJApTnE+/vhjuLi4oGfPnnB1dcXEiRPpe+fPnwcgf7nKU2ftkSd7e3sEBgbSbia2trYSeWqK27dvIz8/HwCgqamJb775BhRFwdjYGGvXrqX1RO4yLJKwy+v/o6CgAGZmZhKbKpSVlaGrq4u7d++CEMJ4cbYFffv2lZB16dKFbtji6QsJCUFKSgqKiooY9/r169eqNJw7dw4hISGYPn06/P39Gfeys7NRW1sLIyOjJp9/8+YNevfu3eR9R0dHaGpq4sSJE3BxcQHQ0CmVlJQYg4CnT59CSUkJenp6EmHweDxcvHgR+fn5UstMVgYOHNjqZ3JyciAUCnHy5MkmTwWgKIr+u6CgQKoPXpcuXZotNxGLFi1CSkoK/P390aNHD4wYMQI2Njb4+OOP6SV8RfMi4s6dOwgNDUV6errEEllZWZlccYijSJ021SeAhqXXljbleXl5ITw8HDExMfQmwqioKAwePBgjR46k9d5Xn9fW1paQde/eHb169WK8ZAHQg++mfA1b4quvvsKyZcswadIkaGtrw9raGnZ2dnBwcGiXzXA7d+7EDz/80KyOeHuaPn06kpOTAQBbtmyh5To6OnBwcMCSJUuadYFRUlLC1KlTcfz4cZSUlND+4crKyjAyMsLUqVOxYMECKCkpMXz8tLW1Ge3M3NwcysrKtD9ednY2Bg0axHhm+PDhjLitrKxw48YNAGjSf9DKyor+W/xD6s2bN03mSYRQKMS8efNw+/btZvVE5WlqagoDAwM8fvwY9+7dw/z58wE0DNwsLS2xYMEC2NnZtRivOI3tPo/HQ1xcHIAGVwEACpWrrHXWHnkSrxMAsLS0pH1zRXlqiidPntB/l5SU0IPjxjx//hzV1dXvfQPx3xF20CkDhJB2C7upF5p4nOXl5XB3dwchBP7+/tDX10fnzp3B4XAQGBhI75pXhN9++w0rV66ElZUVtm3bJjUdPB6P4evVmJaOpujQoQPc3d2xb98+FBQUoHfv3oiLi8O4ceMYA+bmyrnxveYGAtI2kYhQZHd142dEaXF3d2/yiJm23MXdv39/XLhwATdv3kRKSgpSU1Oxbt06fPfdd4iIiGjS8Mmarrt372LOnDkwMjLC+vXrMWDAAHTs2BGEEMycObNVs6vy1KmI5upWlr7I4/Fgbm6O6OhoBAQE4P79+3jy5EmTfneKxiOiufYmjaYGe80dNSWenqbKR9oGBhsbGyQnJ+PatWu4desWbt26hejoaAwZMgTHjx+XGOS2hvLycuzdu5e+3rBhA2bMmAF1dXWEhYXRG5zE25Orqys6d+6MqKgoZGRk0P6D+fn5OHz4MLKzs/HTTz81G+/GjRthYGCACxcuICsrC+Xl5RAIBLSPXV1dHfz9/RWy4621/eIfheKrGrKEm5SURA841dXVceDAAVhZWUFFRQXe3t60X66oPFVUVBAREYGDBw/i+vXryM7OBp/PR01NDVJSUnDz5k1ERUW16CMrTuNVLGmrWoqUkax11h55aiuUlZWbHVTW1NSwg04psIPO/6Gjo4OcnBwIBALGS0EgECAnJwcDBgxo8xkPWUlKSkJxcTFCQ0MllmgVnQEBGhyoFy1ahN69eyMsLEyq4/PgwYPx7NkzmJqatsox2tPTE/v27UNMTAy4XC7Ky8sllnp1dHQgFAqRnZ0NHo/HuCfaTS9yRBfNfkibMWg8S9zWDBo0CBwOB8XFxRg2bFiL+gMHDkROTo6EvLKyEkVFRdDU1GwxjA4dOsDGxgY2NjYAGhz3PTw8EBYWRu/gVLR9nj59GkKhED///DPjA6KgoKDVOzHlqdO2xMvLC6tXr8bt27cRFxcHZWVluLm5SaRN1j4v3t7El8br6+vx/Plz9OzZs83z0BQaGhpS2720nfVAwxFurq6u9AYq0QDw3Llz9HnCbWHbnj59SrcXZWVlzJs3jx5o/fbbb00+Z29vD3t7ewANm47S0tKwaNEi1NbWIiUlBbW1tfTGOGl06NAB8+fPp2fBSktLcfr0afpD+dKlS/D392fMthcUFODly5fo06cPnT7xti7agKenp0fvoL979y7jpIl79+7Rf4vcI9oS8SX4oUOH0ptT6+rq8OjRI6nP9OzZE6tWrcKqVatACMHTp08RHByMuLg4CIVCXL58Wa4BWmpqKj799FP6+tdff6X/FvUDRcpV1jprjzzdvXsXfn5+9LV4PYr3bWmI51VTUxMpKSlSPyLr6+vZ84qbgPXp/B+Ojo4oLS1FZGQkQx4ZGYmSkhKJwd77oPGMypkzZ/Ds2TOFwqquroaPjw9qampw8ODBJl+a06dPR0VFBcM3R5zGS/1NoaOjA2tra8TExCAyMhI9e/aEo6MjQ0fkSxoaGsr4en78+DEuXboEMzMzeummf//+UFVVpZfmRKSlpSE1NVWmNClKjx49YGtri6SkJImdpUDDzIP4sUn29vbIz89n+LQCwKFDh2SaRZQ2wDAxMYGqqipjqVLklynL0p04ogFH4/YVGhoqoauurg4OhyNzHPLUaVsyefJkdOnSBT/99BPOnTsHOzs7id3Y8vR5kbuEaHZJxMGDB+We6WwtFEWhtraW0c6FQiH279/P0COEMHz/RIhOCxBvO126dGnSjaKwsBBXrlxp8WNuwIAB9N8CgQDh4eF4+vQp9u/fL9H2RWzevBlBQUH0LFZlZSXq6uoYZdpSH5k7dy7CwsKQmpqK/Px88Plb0iRdAAAgAElEQVR8hu+vaNBjbW1Np1EgEMDPzw8XL17E2bNnGT8gYmlpicGDBwMA40MlMjIS33//PZKSkrB582aG37b4j4G0FeLl+dtvv+HWrVv466+/sHLlSoljoICGZe4FCxYgKioK9+/fx8uXL1FfX884Ok7eVYukpCRs2LABFy9exPr162l3AgCYNGkSAMXKVdY6a488JSYmYtOmTUhISMDGjRsZx16J8tQU1tbWtHvMy5cv6VMmioqK6F36ixcvxvr16xnP3bx5U+rxWh8i7FD8f/j5+eH8+fMIDAzEw4cPYWJigkePHuHEiRPQ0dFp8aiW9mTUqFHQ0NDAhg0bkJubCy0tLaSnpyM+Ph4URaGmpkbuMIOCgvDo0SN4e3vj+fPneP78OeP+8OHDoa6uDm9vb6SkpGDfvn24f/8+bG1t0bVrVzx79gw3btxA165d6fMhW8LLywvLli1DYWEhfHx8JL4Ex44di8mTJ+Ps2bMoKyuDnZ0dfbyOiooKNm7cSOuqqqrC09MTR44cwaJFizBq1CgUFBTg1KlTMDExwYMHD+QuE3nYsmULPDw84OvrCxcXF/pFnpeXh4SEBHh6etL+hP7+/oiLi8OyZcsYRyYlJSWhT58+Lc4y+fj4QE1NDSNGjMCAAQNQVVWF2NhY8Pl8xvmQojRs2rQJTk5OUFFRgbm5uVQfQnGcnJwQERGBuXPnYtasWeBwOLh8+TKKi4sl6khNTQ1GRkZISEgAj8eDlpYW1NXV6ZmqxshTp22Juro6pkyZQp/JKe1HIOTp8yNGjACPx8MPP/yAV69eYdCgQUhPT0dGRgY9q/OumDlzJsLCwvDpp59i3rx5UFNTw6VLlyQ2pdXV1WHkyJFwdHSEsbExevfujRcvXuDo0aNQV1dnDKrNzc1x+PBhbN++HUZGRuBwOLC3t4e6ujqSk5Px5ZdfYtWqVRI+3+JoaGjAxcUFZ8+eBdCwhCqqXxMTE6ln9RYWFuLChQs4dOiQ1DAnT57c4hLlo0ePGIOhxogGhCoqKggJCcGCBQtQWVmJjIwMxiwe0OBPLH7248yZM3Hz5k388ssvqK+vx65duyTC9/f3Z2wqaitsbGygo6OD/Px8VFdX00eUqaurg6Ioic1IAoEAycnJEh/iIlRVVTFlyhS50jBkyBAcPXoUR48eZcjt7Ozo9qNIucpaZ+2RJ319ffz0008SbhvieWqKDh064Pvvv8eCBQtQVlaGCxcuSBz9BEBiVeWLL75AaWnpO9vQ93eGHXT+j65duyImJgY7d+5EYmIioqOjoampCU9PTyxfvlxiN+27RFNTEz/99BO2b99OnyM2bNgwREREYMeOHYyD3GVFNEP6448/4scff5S4f+HCBRgYGKBDhw44fPgwwsPDcerUKXz//fcAgN69e9OHFMuKs7MzevbsidLS0iZ3Ue/atQsmJiY4efIkgoKC0KlTJ4wYMQKfffYZjI2NGbqrV69GfX094uPjcf36dZiYmODAgQM4c+ZMuw86+/Xrh7NnzyIsLAyXLl3C+fPn0alTJ/Tt2xfOzs6MXZ49evRAVFQUtm7disjISBBCYGVlhWPHjsHb27vZpUOg4cV37tw5HD9+HK9fv4aGhgZ4PB5+/PFHjB8/ntZzcHCAv78/YmNjkZiYCIFAgB07drQ46LSxscHOnTsRGhqKbdu2oUuXLpgwYQJ27twpdQPU9u3bsWnTJgQHB6OmpgYDBgxoctAJyFenbYmXlxeOHTuGPn36MMpJhDx9nsPhICwsDF9//TViYmKgrKyMMWPGIDIyEnPnzm23PEijb9++OHDgALZv3469e/eie/fumDJlCubNm0e7XwANL0hvb2/cvn0bN27cQGVlJbS0tGBtbY3Fixcz2kVAQABevnyJEydOoKysDIQQJCcnQ11dnT4oXxbf4W3btkFLSwvx8fF49eoVdHV14e/vj+LiYqmDzmnTpkFFRQUPHz5ESUkJqqqq0LFjR3C5XEycOBELFy5sMU5/f3/cvn0bjx8/xuvXr8Hn89GtWzcYGRnBy8uL8aMYFhYWiI+Px/79+5GcnIzCwkJwOBzo6OjA3t4evr6+DBcTJSUlfP/997C3t0d0dDQePXqEyspKdOvWDUOHDsW8efMYu5/bkk6dOuHo0aMICgpCamoq+Hw+hg0bhtWrV+OHH36QGHT27t0bixYtwv3795Gbm4uysjIIBAJoamrC0tISfn5+zW4IlYbofNfQ0FDk5OSgZ8+emDp1Kj777DOGnrzlKmudtUeeRB9qhw4dwl9//dVknppi6NCh9IdSUlISvaO+V69e0NbWxtixYxn2XyAQoLy8HAYGBnKl898Kh7TnLhkWFjEEAgHGjBkDbW1tREdHv+/kvHdqa2thZmaGjz/+GDt37nzfyfnX8eTJEzg7O2Px4sX44osv3ndy/rHMmzcP5eXliI2Nfd9JYXkHzJkzBzdv3gTQ4P4gmmFlUYy0tDS4u7vj0KFDmDBhwvtOznuH9elkeWdcuHABRUVFTe74/jcj7oMk4uDBgxAIBPTPSLK0LUeOHIGSkpLUpXUW2aivr8e9e/fk/tUXFhaWBm7dugULCwt2wPk/2OV1lnZHtATx/fffQ1tbmz6r80Nizpw5MDQ0hKmpKQQCAe7cuYNz587BzMyMsSOWpXW8ffsWFy5cwF9//YXIyEi4uLi06F7A0jQdOnSQuizOwsIiGwEBAQgICHjfyfjbwA46WdqdrVu3IicnB8bGxggKCvogf5PW1tYWZ8+exZkzZ8Dn89GvXz988skn+M9//sMerdGGVFZW4j//+Q86deoEOzs7BAYGvu8ksbCwsLD8D9ank4WFhYWFhYWFpd1hfTpZWFhYWFhYWFjaHXbQycLCwsLCwsLC0u6wg04WFhYWFhYWFpZ2hx10srCwsLCwsLCwtDvsoJOFhYWFhYWFhaXdYQedLCwsLCwsLCws7Q476GRhYWFhYWFhYWl32EEnCwsLCwsLCwtLu8MOOgE4Ozv/Y3+KsLq6GlwuF5s3b25VOFeuXAGXy0VCQkIbpYzl305btb0PCVtbW3h4eLzvZCA+Ph5cLhdXrlx530lhYfnbcPr0aTg7O4PH44HL5eLRo0fvO0n/OthB57+QmpoahISE4Pr16+87KSwsHxxHjx7F8ePH33cyWNoY1q7+u8nKysLKlSvRv39/bN++HXv27IG2tvb7Tta/DvZHn//hqKurIysri/H73bW1tQgJCYGPjw/GjRsnUzi2trbIysr6IH8XnYWlLQkPD4eamhq8vLzed1JY2hBF7CrLP4ebN29CKBRi3bp10NXVfd/J+dfCznT+C1BTU4OysnKrwlBSUoKamhqUlD6cJlFVVfW+k0AjEAhQU1PzvpPBwsLC8o+Bz+dDIBC0SVjFxcUAgG7durVJeCLaMo3/Bj6cEQaAFy9eYOnSpRg6dCjMzMywYMECPHnypEn9Bw8ewM/PD5aWljA0NIS9vT12794t0YBEflo3b96Eh4cHTExMMGzYMKxcuRKlpaUS4b5+/RobNmzA6NGjYWBggNGjR2P9+vUSuvX19dizZw+cnJxgYmKCIUOGwNnZGV9//TWt09iv7saNG7C0tAQAHDx4EFwuF1wut8Uvc2k+nbGxseByubh27Rp27dqFcePGwdDQEHZ2djh58mSz4QFARUUFTExM4OfnJ/X+N998Ay6Xy6iD33//Hf7+/rCwsACPx4OjoyNCQ0MlyjwsLAxcLhd5eXkS4To4OGD69On09YsXL8DlchEcHIxjx45h4sSJ4PF4WLt2bbPpl7dei4qKsH79eowZMwYGBgawtrbGV199hVevXjH0ROV6+fJlbNu2DWPHjgWPx8OZM2cAAOfOnYObmxuGDRsGY2Nj2NjYYNmyZXjx4gUjHFnLas2aNeByuSgsLMQXX3wBS0tL8Hg8eHh44P79+wzdN2/eYMeOHXB1daXDdXBwwJ49e1plOIuKirBu3TqMGzcOPB4PlpaWmDZtGiIiImgd8Xo6cuQIHB0dYWhoiDFjxiAkJEQi/j/++ANfffUVHBwcYGpqClNTU7i5uSE+Pr7JNAQGBsLGxgaGhoYYPnw45s+fj9u3bzP0ZO33jREIBHR7zszMpPsel8vF06dPGbq5ublYuHAhzMzMMGTIECxevJh+6TVOsyxtqjmioqLoshw/fjzCwsIgFAql6spqmwCgrq4OYWFhmDhxIoyMjDB06FBMnTqVUaeK9tNLly7BxcUFRkZGsLW1RVRUFK2zZMkSDBs2DKampli+fLnUj8e6ujrs3buX9s8bOnQoFi5ciIcPHzL0ZLVxstjVBw8ewMfHByNHjoShoSGsra0xf/583LlzR2pZiyOyNRkZGZgzZw5MTU2bbRey5k+k21I9yROmLO8lQDY7Jk8fFtmx/Px8LFu2DJaWljA2Nsbvv/8u93tGnOLiYnC5XOzfvx8AMHLkSHC5XMZKhby2tnEaCwoKpMb9IfLBLK+Xl5djxowZePHiBebOnQs9PT3cvXsXs2bNQocOHaClpcXQv3btGvz8/DBw4EB88skn6NGjB9LS0rBz505kZWVhz549DP2//voLK1aswNq1azFkyBA8fPgQgYGBePDgAc6cOQM1NTUADQMxNzc35OXlwcPDA2ZmZsjMzERERASuX7+OuLg4+ktr69at+Pnnn+Hh4YGFCxdCKBQiPz8fSUlJTebT1NQU+/btg5+fHyZPnozZs2cDAB2/IgQGBsLMzAzbt2+Hqqoq9u3bh5UrV2Lw4MG0IZZG165dMWnSJMTGxqK4uJhRxgKBAKdPn4aFhQX09fUBNBhtT09PdOjQAXPmzEHv3r1x5coVfPvtt3jw4AH27t2rcB4A4NSpU1BVVcXy5csxcOBA1NfXt/iMrPX6/PlzuLm5oaamBp6enhg0aBDy8/MRHh6OW7duIS4uDl27dmWEHRgYCAMDA2zcuBHdunVD9+7dcfHiRSxbtgyjRo3CihUr0LFjRxQWFiI5ORkvX75E3759FS6rOXPmYNKkSTh48CDevHmDoKAgfPLJJ7h+/To6d+4MACgoKMCpU6cwceJEuLu7QygUIjk5GcHBwcjPz8f27dsVKntvb288ffoUc+fOBUVRqKysxOPHj+k+KE5MTAy0tLSwZcsW9OnTB5cuXcKOHTtQWFiIb775htZLTk7G/fv34ezsjIEDB6KsrAyxsbEICAjAf//7X8aApqCgAO7u7igtLYWbmxtMTU1RVVWF9PR03Lp1C9bW1gDk7/fiKCkpISIiAqtWrYKKigqCgoLoe+Jtv6SkBJ6enhg3bhyWL1+O33//HSdPnkRlZSWOHDlC6ynSphpz+PBhbNmyBaamplizZg34fD4iIiKkzubIY5vevn2LuXPn4tdff8X48ePh5uYGVVVV/P7777h06ZJEncpDSkoKIiIi4OnpCVdXV8TExGDNmjVQVVVFcHAwLCwssHTpUmRkZNB9ULxd1NfXY8GCBfj1118xdepUzJs3DxUVFYiMjISHhwdOnDgBMzMzRpwt2biW7OqzZ88we/ZsaGpqYuHChdDU1MSrV6+Qnp6OR48eYeTIkS3mOz8/H59++imWLFmC1atXIysrC5s3b8aKFSsY7UKe/MlaT/KEKct7SVY7Jk8fFjFr1iw4Oztj9+7d4PP5GDBggFzvmcZ0794dEREROHbsGH755ReEhoZCQ0ODbuuK2NrGaezevXuL9f/BQD4QduzYQSiKInFxcQz5f//7X0JRFHFxcaFltbW1ZPjw4cTV1ZXw+XyGflhYGKEoity5c4eWjR8/nlAURc6fP8/QTUxMJBRFkf3799Oyb7/9llAURcLDwxm64eHhhKIosnXrVlo2evRo4uvr22y+qqqqCEVRZNOmTbSstLSUUBRFgoKCmn1WWlovXrxIy06fPk0oiiKzZs1i6FZWVhJTU1OybNmyFsO9d+8eoSiKhIWFMeQJCQmEoigSFRVFy9zd3Ymenh7JysqiZUKhkCxZsoRQFEWuXLlCy0NDQwlFUSQ3N1ciTnt7ezJt2jT6urCwkFAURYyMjMjLly9bTLMIeerV19eXDBkyhOTl5TF009LSiK6uLgkJCaFlonJ1cXEhQqGQof/5558Tc3NzUl9f32za5Cmr1atXE4qiGGkghJDbt28TiqLIsWPHaFltba3UuNesWUN0dXXJ8+fPaZm0tieNvLw8QlEUOXToULN6onoyMDAghYWFjHtBQUGEoiiSkZFByyorKyXC4PP5xMnJiUyYMIEhX7BgAaEoiqSkpEg8IxAICCHy9/umcHJyYtgTcURtKiYmhiFfv349oSiKZGdn0zJ52pQ0ysrKiLGxMZk0aRKpra2l5SUlJcTc3JxQFEUSExNpuTy2SdT/goODJeIVlae4njz9lMfjMfSLioqIgYEB4XK5ZO/evYwwfH19ib6+PqmqqqJlhw4dIhRFkfj4eInysLa2ZtgzeWxcc3Y1KiqKUBRFMjMzJe7JgqhdNG5fu3btkmgX8uRP1nqSJ0xZ3kuy2jF5+rDIjm3evFniGXneM00hsjFFRUUMuSK2VloaWRr4YJbXL126hIEDB+Ljjz9myBctWiThx5iSkoKSkhLMmTMHhBDw+Xz6n+hopZSUFMYz3bp1g7OzM0M2YcIEaGpqMpasExIS0LNnT4lNBl5eXtDU1MTFixdpWdeuXZGVlYXff/9d8Yy3AdOmTWNcd+7cGbq6uvjrr79afNbCwgIGBgaIjo5myKOioqCuro7JkycDAF69eoV79+7BwcEBPB6P1uNwOFiyZAkAMMpGEWxtbdG7d2+5npGlXisqKnDlyhU4OTmhT58+jPZibGwMiqIk2gsAuLu7g8PhMGRdu3ZFZWUlEhMTm1wCVbSsGtfjkCFDADQs9Ypo7B/89u1b8Pl8ODo6QigUIiMjQ2qamqNLly7gcDhISUmRulTYmPHjx9MzISJExwyJ50s0Oyuirq4OhBBMmDABubm5ePPmDYAGl4Hk5GTY2tpizJgxEvGJ+r8i/V4RevXqJTGDI1qmFS1DK9qmxLl+/Tpqamrg7e3NWOno1asX3N3dJfTlsU1xcXHQ0NCg25s4rfULd3BwwKBBg+hrLS0tcLlcAMCCBQsYuiNHjkR9fT3DfSE2Nhba2tqYMGECo9zU1NRgZ2eH1NRU1NXVMcJpjY0DQM84x8fHo7a2Vua8itO/f3+MGDGCIRs6dCgAMNwT5MmfrPUkT5iyvJdksWOA7H1YnBkzZkjIZH3PyIuitlZaGlka+GCW1/Pz8zFq1CipL/nGA5Hs7GwAwOrVq7F69Wqp4TX2qRo4cKCEseVwOBg4cCDy8/NpWUFBAczMzCQ2/igrK0NXVxd3794FIQQcDgdfffUVli1bhkmTJkFbWxvW1taws7ODg4NDqzcOyUPjAQDQMJAQz1dzeHp6YtOmTbh79y6srKxQVFSEa9euwd3dHerq6gBA+7xIWwLR09ODsrJyq/1iFDn+QpZ6zcnJgVAoxMmTJ5v0daUoSqb0LFq0CCkpKfD390ePHj0wYsQI2NjY4OOPP6ZfbIqWVb9+/RjXnTp1grKysoRhj4yMRGRkJP744w/w+XzGvfLycqn5a46ePXtixYoV2LlzJ0aNGgUjIyOMHj0azs7OsLCwkNDX0dFpUiaer+rqauzduxfx8fHIz8+X8K8qLy9H9+7dkZeXB0IIjIyMmk2nIv1eEbS1tSXsUI8ePQA0+FQCircpcURlpaenJ3FPWtuRxzbl5ubC2NgYqqqqzaZBEaT1i+7du6NXr17o1KkTQ66hoQHg/8sNaKjH2traZuv7zZs3DLvfWhvn6OgIe3t77N27F4cOHYKFhQXGjh2LyZMnS23P0mjcP0VpABTPn6z1JE+YsryXZLFjgOx9WJyBAwdKTZ8s7xl5UdTWNpVGlg9o0AlAwtA3BSEEQIOfj2g2qDG9evViXDf+cm5J3ly8ImxsbJCcnIxr167h1q1buHXrFqKjozFkyBAcP35cwgC3F02VW+P0NsW0adOwY8cOREVFwcrKCqdOnYJAIMDMmTPlDksWmvLVVOQFKUu9itLu7u7e5DE50nxqpaWnf//+uHDhAm7evImUlBSkpqZi3bp1+O677xAREQF9fX2Fy0qWegwLC8OOHTswefJk+Pr6QktLCyoqKnj06BHWr1/f7KxFcyxevBhTpkxBYmIiUlNTER0djQMHDmDOnDnYtGkTQ7fxQBdomHGVFuaNGzfg6+sLS0tLdO/eHUpKSoiOjkZkZCSdL9H/LfV/Rfq9IjT3wdg4zfK2KWlhScu3vG2osb5o8NkamuqnTZWP+LFwjRFPHyEEPB6P4VPbGNEgX0RrbZyysjIOHDiAzMxMXLt2Db/++itCQkKwa9cu7NixA1OmTJEpnKZQNH+y1pM8YcryXpLFjgGy92Fxmmr3srxn5EVRW9uaPRT/dj6YQaeOjg6ys7MlOmFFRQWKioqgqalJy0QzCDU1NRg2bJhM4T99+hTV1dWMLyo+n4/8/HwYGhoy0pGTkwOBQMAwrgKBADk5ORgwYAAjfd26dYOrqyu9vCcaFJw7d+5v8csmsqChoYGJEyfi/Pnz2LBhA6KiomBgYABzc3NaRzQbIG2H4Z9//gmBQMCYARF9/b5584axFFdfX4/nz5+jZ8+ebZJ2Wep10KBB4HA4KC4ulrm9NEeHDh1gY2MDGxsbAEBaWho8PDwQFhaG4OBguctKHqKjo2FoaIgffvhBItzWMmDAAMybNw/z5s3D27dvERAQgKNHj8LHx4cxGyQtX48fPwbw/7NgRUVFSE5Oxvz587Fq1SqG7rFjxxjXgwcPBofDafHXRRTp99Jo7WAMaJs2Jd5ORMu0IqTVpzy2iaIo/Pnnn6irq2v2Y+5d9VNxBg8ejGfPnsHU1PSdnzss2oENNGwYEw2EWjvoFEee/MlaT/KWmSzvpZbsmDx9WBZkec/IS3va2g+VD8an08HBAU+fPsX58+cZ8oMHD0rM3owbNw69evXCgQMHUFhYKBFWdXU1KisrGbLa2lrGDkOgoeNUVlbC0dGRljk6OqK0tBSRkZEM3cjISJSUlND+g4QQqUuZog5UVlbWZF7V1dXB4XCk+sO8Lzw9PVFdXY0NGzYgNzdXwuelV69esLS0xOXLl+kBBtBQDqIdw+K+laIBQuOd/AcPHpRpV7qsyFKvPXr0gK2tLZKSknD16lWJMIRCoczLstLqzMTEBKqqqnSdy1tW8sDhcCAUChlf+Hw+H4cOHVIoPKChvzSuExUVFZiYmACQXLJPTU1Feno6fU0IoY8zcXJyotMJSM6W5eXlSfTx7t27w8bGBklJSbh165ZE+kR5VaTfS6NLly7N9k9ZaIs2NXbsWHTs2BE///wzY2a+tLQUMTExEvqy2iYAcHV1xZs3bxAaGioRjnjbeVf9VJzp06ejoqIC3333ndT7RUVFCoXbnF2VJtPU1KR3ZLcl8uRP1nqSNUxZ30uy2DF5+rCstPSekZf2tLUfKh/MTKefnx/i4uKwfPlypKen00cmJSUlSfh0qqurIzg4GH5+fnB2dsaMGTOgq6uL8vJyZGdn4+LFizh8+DCsrKzoZ7S1tREWFoa8vDwMHToUmZmZOH78OPT19TFv3jxGOs6fP4/AwEA8fPgQJiYmePToEU6cOAEdHR0sXrwYQMPy7ciRI+Ho6AhjY2P07t0bL168wNGjR6Gurt5sQ1dTU4ORkRESEhLA4/GgpaUFdXV12Nvbt3Gpys7w4cOhp6eH2NhYqKioSDjuA8CGDRvg6emJGTNmMI6mSE5OhrOzMyZMmEDrjhgxAjweDz/88ANevXqFQYMGIT09HRkZGejTp0+bpVvWet2yZQs8PDzg6+sLFxcX2gjn5eUhISEBnp6eUp35G+Pj4wM1NTWMGDECAwYMQFVVFWJjY8Hn8xmbT+QpK3lwdnZGaGgoFi1aBEdHR7x+/RoxMTEK+0QBwK+//ooVK1Zg4sSJ4HK56NatG/744w+Eh4eDx+NJ+JEZGxtj/vz5WLBgAX1kUnJyMtzd3ellby0tLVhYWCAyMhIqKiowNjZGXl4ejh49Cj09PWRmZjLC3LhxI9zc3DBv3jy4u7vD1NQUNTU1tC1Yvny5Qv1eGubm5jh8+DC2b98OIyMjcDgc2Nvby12GrW1TGhoa+Pzzz7F161Z4eHhg2rRpqK2txfHjx9G/f3+JgYGstgkAPvnkEyQmJiIkJAS//fYbxowZA1VVVTx+/BjPnj3D4cOHAby7fiqOt7c3UlJSsG/fPty/fx+2trbo2rUrnj17hhs3bqBr164IDw+XO9zm7Or+/ftx+fJl2NvbQ1tbG8rKyrhx4wbu3LmDuXPnvrf8yVpPsoYp63tJFjsmbx+WBVneM/LSXrb2Q+WDGXRqaGggKioKQUFBOHHiBAghsLKyQkREhFTDbWNjgzNnziA0NBTnzp1DaWkpNDQ0MGjQIPj5+Uk4Fvfu3Rt79+7F1q1bERcXBxUVFUydOhVfffUVOnbsSOt17doVMTEx2LlzJxITExEdHQ1NTU14enpi+fLltGN8hw4d4O3tjdu3b+PGjRuorKyElpYWrK2tsXjx4han9Ldv345NmzYhODgYNTU1GDBgwHsddAINX6FbtmyBk5OThE8VAJiZmSEmJga7du3CsWPHUF1dDW1tbaxcuRKLFi1i6HI4HISFheHrr79GTEwMlJWVMWbMGERGRrapkZe1Xvv164ezZ88iLCwMly5dwvnz59GpUyf07dsXzs7OmDhxokzxzZw5E+fOncPx48fx+vVraGhogMfj4ccff8T48eNpPXnKSh4+++wzKCkpITY2FsnJyejXrx/c3NwwatQohWcN9PX18dFHHyE1NRVnzpxBfX09+vXrB29vbyxatEjCh2/ChAkwMDDA3r17kZOTg169emHZsmVYunQpQ2/Pnj3Ytm0bzp49S38IbNmyBQUFBRIvLB0dHYGUumIAACAASURBVJw9exY//PADrl69ipMnT0JDQwOmpqYYPXo0rSdvv5dGQEAAXr58iRMnTqCsrAyEECQnJ8s96GyLNuXj44MuXbrg4MGD2LZtG/r27QsvLy9oa2tLlKestglo8Ec+evQo9u/fj7Nnz+Lbb79Fx44dQVEUw4fuXfVTcTp06IDDhw8jPDwcp06dwvfffw+goS+bm5vDzc1N4bCbsqu2trYoKCjAL7/8gpKSEqioqGDQoEHYtGlTm/8cqjz5k7WeZA1T1veSrHZMnj4sKy29Z+SlvWzthwqHtOUOjg8UW1tbaGlpSRzXwMLk6NGj2LBhA44cOYKxY8e+7+S0CFuv75YXL15g9OjRCAgIwIoVK953clhYWP6B/NPeMx8aH4xPJ8v7RSgU4ujRo9DR0ZF6ViILCwsLC0trYN8zf38+mOV1lvfDixcvkJqaiqSkJDx+/Bjbtm1rk929LCwsLCwsAPue+SfBDjpZ2pX09HR89tln6NGjBxYtWsT+UgMLCwsLS5vCvmf+ObA+nSwsLCwsLCwsLO0O69PJwsLCwsLCwsLS7rCDThYWFhYWFhYWlnaHHXSysLCwsLCwsLC0O+ygk4WFhYWFhYWFpd1hB50sLCwsLCwsLCztDjvoZGFhYWFhYWFhaXfYQScLCwsLCwsLC0u7ww46WVhYWFhYWFhY2h120CmF2NhYcLlc3Lhx453G6+rqismTJyv8/JUrV8DlcpGQkNCGqWJ5HzRVlyUlJVi6dCksLS3B5XLh5+cHAODxeFi2bNn7SOp7o76+Hjt27MC4ceOgr6+PoUOHNqsvEAiwY8cOjB07Frq6urT+ggULMGLEiHeR5L99H62urgaXy8XmzZvbRZ/ln09r6/xDbTMfar4bw/4MZhtw9OhRKCsrw8vL630nheUfxJMnT3D+/Hm4urqCoiiZngkMDMS1a9ewfPlyDBgwAH369GnnVP4/NTU12L9/PywsLDBu3Lh3Fm9THDt2DGFhYViwYAEsLS2hpqbWrH5ERIRc+v9W5K3Hv1u9v0sUyfu7KK/CwkLs27cPN2/exLNnz6CiooI+ffpg6NChcHV1xdixY9sl3vfBvzmvH2LfYgedbUB4eDjU1NRaPeg8efJkq563tbVFVlYWVFRUWhUOy7shOzsbISEhMDMzkxh0NlWXycnJsLe3h4+PD0OelpaGDh3atzvX1tYiJCQEPj4+fwsDmZKSAk1NTWzYsEEm/eTkZKn6+/btg1AobI8k/i1prh7V1dWRlZXFaEt/t3p/lyiS9/Yur+zsbLi5uaGmpgbOzs6YOnUqBAIBcnNzceXKFbx9+7ZdB2LS2kh78b7z2pawfasBdtD5N6K1g0UlJaUPdvZGFqqqqtC5c+f3nQzw+fwWDba0uqypqUFVVRW6desmoa+urt6mafwnUFxcLLUs5NVn+wwTtjz+3uzZswcVFRUICwuDk5MT455AIMCzZ8/aPQ3vqo38HfLaWkT2XllZme1b+MB8Ol+/fo0NGzZg9OjRMDAwwOjRo7F+/XqUlpZK1a+vr0dISAjGjh0LQ0NDTJw4Eb/88gt9XyAQgMvl4smTJ8jMzASXy6X/PX36FACQmJiITz/9FOPGjQOPx4OFhQV8fHzw8OFDifik+XTa2trCw8MDGRkZmDNnDkxNTTFkyBAsXrwYxcXFDF1p/mIi/9Rr165h165dGDduHAwNDWFnZyd1ZrWmpgZBQUEYOXIkjIyMMGXKFFy9ehWBgYHgcrmorq5utozd3d1hZWWF+vp6iXvXrl0Dl8vFsWPHGGW4b98+ODk5gcfjYdiwYVi0aBGysrIYz6anp4PL5eLEiRMS4a5btw5cLhd8Pl+i3O7cuQMvLy+YmprC0dGx2bQDQFFREQIDA2FjYwNDQ0MMHz4c8+fPx+3btxl6Dx48gJ+fHywtLWFoaAh7e3vs3r0bAoGAobdmzRpwuVzk5+dj2bJlsLS0hLGxMQIDAxEQEAAA8PHxodvNypUrAUjW5bp162BiYgKgYZlYvK2J/knz6czMzMTixYthZWUFQ0NDjBkzBsuXL2cY68OHD2P27NkYOXIk3S+++uorRr+4ceMGLC0tAQAHDx6k42z8dS5ruTSFLO3h7Nmz4HK5yMjIQE5ODp2WkJAQqWE2pS/+r7FPpzz97s2bN9ixYwdcXV1hYWEBHo8HBwcH7NmzR+Z8N0VRURHWr1+PMWPGwMDAANbW1vjqq6/w6tUrhp6onRUWFuKLL76ApaUleDwePDw8cP/+fVqvpXps7Hcma71L49y5c/Dw8ICpqSmMjY0xbdo0xMfHM3RevHgBLpeL4OBgXLp0CS4uLjAyMoKtrS2ioqJonSVLlmDYsGEwNTXF8uXLUVVVJRFfXV0d9u7dC2dnZ/B4PAwdOhQLFy6UsLWy2kRF8i7LM7LavKbIz88Hh8OBnZ2dxD1lZWXo6OgwZPLGd/78eXh6emLIkCEwNjaGg4MDNm/eTLflpnwTZbEj8iJvXgH5bJAs9pHH42H58uUSz4rsSlJSEi1ryt4XFBTI1bcqKipgYmJC++w35ptvvqHHHv80PpiZzoqKCri5uSEvLw8eHh4wMzNDZmYmIiIicP36dcTFxUnMguzYsQN8Ph/z588Hh8NBdHQ0li5dirdv32Lq1KlQUlJCREQEVq1aBRUVFQQFBdHPamlpAQBOnDiBuro6eHh4oE+fPigoKMDx48cxY8YMxMXFQVdXt8W05+fn49NPP8WSJUuwevVqZGVlYfPmzVixYgWOHDkiU/4DAwNhZmaG7du3Q1VVFfv27cPKlSsxePBguuEDQEBAAK5du4ZJkyZh9OjRePr0KZYtWya1c0vD09MTq1atwuXLl/HRRx8x7kVFRaFjx45wdXWlZZ999hl++eUXjB07FrNnz0ZJSQmOHDkCNzc3REREwNzcXKZ4pZGbm4vFixcjICAAX3zxBV6/ft2sfkFBAdzd3VFaWgo3NzeYmpqiqqoK6enpuHXrFqytrQE0DJ79/PwwcOBAfPLJJ+jRowfS0tKwc+dOZGVlYc+ePRJhz5o1C87Ozti9ezf4fD60tLSgpaWFkJAQrFy5EsOGDQPw/+2mMQsXLoSDgwM++eQT2NvbY+HChYz78+bNk3jm8uXLWLx4Mbp06YKZM2dCR0cHxcXFSEpKwp9//okBAwYAAEJDQzFu3DjY2Niga9euyMzMxMmTJ3Hv3j2cPXsWqqqqMDU1xb59++Dn54fJkydj9uzZAJgzHoqUS2NkaQ+jR49GREQE1q5di9raWgQHBwMABg4cKDXMpvRFfPPNN3j+/LnEc7L2u4KCApw6dQoTJ06Eu7s7hEIhkpOTERwcjPz8fGzfvr3FfEvj+fPn9NKip6cnBg0ahPz8fISHh+PWrVuIi4tD165dGc/MmTMHkyZNwsGDB/HmzRsEBQXhk08+wfXr19G5c2eZ6lEcefVFBAcHY8+ePRg9ejSWL1+ODh064MKFCwgICMDGjRsxd+5chn5KSgoiIiLg6ekJV1dXxMTEYM2aNVBVVUVwcDAsLCywdOlSZGRk4MyZM1BTU8M333xDP19fX48FCxbg119/xdSpUzFv3jxUVFQgMjISHh4eOHHiBMzMzBhxtmQTFcm7LM+01uYNGjQI6enpiIuLw/Tp05vVlTe+7du3Y9++fdDX14ePjw80NTWRn5+P+Ph4rFq1CsrKyk3GI4sdkRd58yqPDZLVPipCY3vfvXt3CZ3m2krXrl0xadIkxMbGori4mPFeEAgEOH36NCwsLKCvr69wGt8b5APh22+/JRRFkfDwcIY8PDycUBRFtm7dSstOnz5NKIoio0aNImVlZbS8oqKC2NjYEAsLC1JbW0vLnZyciIuLi9R4KysrJWS5ubmEx+ORL7/8kiF3cXEhkyZNYsjGjx9PKIoid+7cYch37dpFKIoi2dnZtCwxMZFQFEUuXrwokZdZs2ZJpMvU1JQsW7aMll29epVQFEW+/vprhu7NmzcJRVGEoihSVVUlNZ8iampqyNChQ4m3tzdD/urVK6Kvr08+//xzWnb9+nVCURRZsmQJEQqFtDwrK4vo6ekRV1dXWpaWlkYoiiKRkZESca5du5ZQFMWoE1G5JSQkNJtecRYsWEAoiiIpKSkS9wQCASGEkNraWjJ8+HDi6upK+Hw+QycsLEyirlavXk0oiiKbN2+WCPP8+fOEoiiSmJgocU9aXVZVVRGKosjatWsl9A0NDcnSpUsZuhYWFsTKyoq8ePGiyfwQIr2NxsbGEoqiyJkzZ2hZaWkpoSiKBAUFSejLWy7SkKc9ENLQX+zt7ZsNUxb9+fPnk+HDhzNk8vS72tpaUl9fLxHumjVriK6uLnn+/Dktk1avTeHr60uGDBlC8vLyGPK0tDSiq6tLQkJCaJmonYnLCCHk9u3bhKIocuzYMVrWXD2K2timTZsU1n/w4AGhKIoEBgZK6Ht7exNTU1PajhQWFhKKogiPxyO5ubm0XlFRETEwMCBcLpfs3btXolz09fUZtujQoUOEoigSHx/P0C0rKyPW1tYM+yePTWwu703R3DPytnFpPHnyhJiYmBCKooiTkxNZt24diYyMJH/++Wer4ktPTycURZHZs2dL9GHxZ6XVOSGy25Gmnm9tXuWxQfLYR0NDQ/LZZ59J6MTFxRGKosi1a9doWXP2Xt6+de/ePUJRFAkLC2PIExISCEVRJCoqSuKZfwIfzPJ6QkICevbsKbHZx8vLC5qamrh48aLEM7NmzWLMfnbp0gWzZ8/G69ev8euvv8oUr7gPoVAoRF1dHfr27QsjIyPGsldz9O/fX2L5T3TcS15enkxhTJs2TSJdurq6+Ouvv2hZYmIiAMDX15ehO2rUKIlZgqbo2LEjpkyZguTkZLx48YKWnz59GvX19ZgxYwYtE5V5QEAAOBwOLRctTz548IARhrz06NFDpiV1oGGJNDk5Gba2thgzZozEfSWlhq6SkpKCkpISzJkzB4QQ8Pl8+p9oBjclJUXiefF8vwuuX7+O169fw8fHR+oOd1F+AGYbFQgE4PP5sLW1hZKSksxtVNFyEae924O8yNrv1NTUGDNAb9++BZ/Ph6OjI4RCITIyMuSOu6KiAleuXIGTkxP69OnDKE9jY2NQFCW1PBv38yFDhgBomPV/V5w5cwYA4O3tzUg3n8/HlClTUFVVJdGuHBwcMGjQIPpaS0sLXC4XQMORVuKMHDkS9fX1tAsT0LBkrq2tjQkTJjDiU1NTg52dHVJTU1FXV8cIRxab2Na0RRvX09NDfHw85s+fDz6fj2PHjuHLL7+Eg4MDZsyYwUi/PPHFxcUBAFauXCkxKyn+bFO0hR1pTV7lsUHy2EdFaAt7b2FhAQMDA0RHRzPkUVFRUFdXb9Xxiu+TD2Z5vaCgAGZmZhLLA8rKytDV1cXdu3dBCGF0Lj09PYlwRNPZ+fn5MscbEhKClJQUFBUVMe7169dPpjCk6XXp0gUAWlwyFtG3b1+pYYjno6CgAB07dkT//v0ldHV1dfHgwQOZ4vLy8kJ4eDhiYmKwZMkSAA0dZfDgwRg5ciSt9/TpUygpKUktZx6Ph4sXLyI/P19q2mWhqeVWaeTl5YEQAiMjo2b1srOzAQCrV6/G6tWrpeo09reTNy1tgWiQYWxs3KLunTt3EBoaivT0dFRUVDDulZWVyRSfouUiTnu3B3mRp99FRkYiMjISf/zxB8O3GADKy8vljjsnJwdCoRAnT55s8lQLacdsNU5zp06doKysjDdv3sidBkX5888/AQATJkxoUqdxW9DW1pbQ6d69O3r16oVOnTox5BoaGgCYdZCdnY3a2tpm+++bN2/Qu3dv+loWm9jWtFUb19bWRmBgIAIDA/HmzRvcu3cPMTExuHjxInx8fPDLL7+gY8eOcsWXm5sLDocDHo+nUN7awo5IQ9a8ymOD5LGPitBW9t7T0xObNm3C3bt3YWVlhaKiIly7dg3u7u7/2M2jH8ygszkIIVLl0r7umtKVRnl5Odzd3UEIgb+/P/T19dG5c2dwOBwEBgaipKRE4TTLm56mvlTFn2886FYkHqDBmJmbmyM6OhoBAQG4f/8+njx5glWrVskcZuN7zX1pS9u0BEAuHyJRfC190Yv0AgMD6VmkxvTq1UtC9q53Lcqan7t372LOnDkwMjLC+vXrMWDAAHTs2BGEEMycOVPmo4QULRdpYch7730gnp6wsDDs2LEDkydPhq+vL7S0tKCiooJHjx5h/fr1Ch3HJArf3d29yaPYpLWptui/rYUQAiUlJRw/frzJUxoGDx7MuG7KV7C5Ux4a2y4ej8fwq29Mjx49GNfvo6zao413794d9vb2sLe3h7+/PxISEpCamgobGxu54hPZf1lmNRvTVnakJWTJqyw2SFb72JxOc5sE28reT5s2DTt27EBUVBSsrKxw6tQpCAQCzJw5s03Cfx98MINOHR0d5OTkQCAQMAycQCBATk4OBgwYING4njx5AmdnZ4ZM9BUvvrGmqUaZlJSE4uJihIaGSoQj6wzlu0RbWxvXr19HYWGhxIxJTk6OXGF5eXlh9erVuH37NuLi4qCsrAw3NzeGjo6ODoRCIbKzsyW+rh8/fkzrAKAdsaXN2LTFzMTgwYPB4XDw6NGjZvVEs0s1NTX05h9FUcS4y4oonY8ePWr2HLvTp09DKBTi559/ZryUCwoK5Np53RblIk97+DsRHR0NQ0ND/PDDDwy5yFYowqBBg8DhcFBcXNzqdvauGTx4MJKSkqCiotKqjYDyxvns2TOYmpr+rc8pbu82bmxsjISEBHrJXJ74KIpCUlISfv/99yYHbU3RVnZEHhrnVR4bJKt9BBrePdJmamV1bWsNGhoamDhxIs6fP48NGzYgKioKBgYG76xftQcfjE+no6MjSktLERkZyZBHRkaipKREYlAIAMePH0dlZSV9XV1djWPHjqFHjx4YPnw4Le/SpUuzyweNZ+LOnDnztzxfzN7eHkDD8Q3i3LlzR+aldRGTJ09Gly5d8NNPP+HcuXOws7OT2JktOnctNDSU8dX9+PFjXLp0CWZmZvQyU//+/aGqqork5GRGGGlpaUhNTZUrbdLo3r07bGxskJSUhFu3bkncF6Vv3Lhx6NWrFw4cOIDCwkIJverqakabaQ6RD1R7LH2OGzcOPXr0wMGDByWO+AEkv/Qbt9HQ0FCJZ9TV1cHhcKSmty3KRZ728HeCw+FAKBQy0szn83Ho0CGFw+zRowdsbW2RlJSEq1evStwXCoUtuis0RXP12Bb6Il/Jbdu2SbgaAJBwM2oLpk+fjoqKCnz33XdS7ysap7x5b+mZtmjjiYmJUo+Mqq2txaVLlwD8v2uYPPG5uLgAaDh54O3bt4ywW5qFlceOyIM8eZXHBslqH4GGAWpaWhrDfpWVlUmMJRRBlvbl6emJ6upqbNiwAbm5ue98f0Bb88HMdPr5+eH8+fMIDAzEw4cPYWJigkePHuHEiRPQ0dHB4sWLJZ7p3r07pk2bhpkzZ4LD4SAqKgr5+fn49ttvGdPn5ubmOHz4MLZv3w4jIyNwOBzY29tj1KhR0NDQoBuLlpYW0tPTER8fD4qiUFNT8y6LoEVsbW1hY2ODH3/8EcXFxRg1ahSePn2K8PBwGBkZyXyOHNDQmaZMmUKfySltOWDs2LGYPHkyzp49i7KyMtjZ2dHHeaioqGDjxo20rqqqKjw9PXHkyBEsWrQIo0aNoo+qMTExkXtQLI2NGzfCzc0N8+bNg7u7O0xNTVFTU4P09HTo6elh+fLlUFdXR3BwMPz8/ODs7IwZM2ZAV1cX5eXlyM7OxsWLF3H48GFYWVm1GJ+JiQk6dOiA/fv3o66uDp07d4a2tnabfMWqq6tj69atWLJkCT766CP6SJBXr17h2rVrWLp0KWxsbODk5ISIiAjMnTsXs2bNAofDweXLl1FcXCyxtKmmpgYjIyMkJCSAx+NBS0sL6urqsLe3b5Nykac9/J1wdnZGaGgoFi1aBEdHR7x+/RoxMTGt9rnasmULPDw84OvrCxcXF7pd5OXlIeH/2rv3oKjKPwzgD5caQVARAU3BWQSEUFJJEMxVV2SDGXYMRCms0S5ug8rgTElDpU1jERqzUshCNBA3sxCCwQsgkJIGNuANZyAvNYoGlOKFW8Tl/P5w2F/bLsiiK9E+nxlHPOd7zr7v7nLm8bzvOae0FGFhYao507oY6nN8FPUeHh6IiorCnj17IJVK8cILL2Dq1KloaWnB+fPnceLECTQ0NOjc7qFs2LABJ06cQEpKCs6ePYtly5bB0tISN27cwMmTJ2FpaYmsrCyd96tr3x+0zaP4jicnJyMyMhJisRhubm4YP348WlpaUFxcjOvXr0MqlWLBggUAdPudmj9/Pl5//XV8+eWXkMlkCAwMhI2NDRobG3Ho0CGUlJQMOmysy3FEF7r0VZdj0HCPj8D929FFRERg7dq1WLVqFTo6OpCbm4sZM2agpaVlxH0Dhvf9WrhwIZycnFBQUIAnnnhC4wK4scZgQqelpSUOHDgAhUKB8vJy5ObmYsqUKQgLC8PWrVtVk9P/btu2bThz5gzS09Nx8+ZNiEQiJCQkqP5HOGDTpk1oaWnBN998g7t370IQBFRWVmLGjBn46quvVPc+A+7/Yu/btw+7du1SDW/8WxgZGSEpKQnx8fEoKipCaWkpnJ2doVAoUFxcjPr6ep3mqrz44ovIycmBnZ0dli5dqrVmz549cHd3R15eHj766COYmZnBy8sLUVFRGpO8o6Oj0dvbiyNHjuCHH36Au7s7UlNTUVhY+EhCp4ODA4qKivD555/j+++/R15eHiZOnIg5c+bA19dXVScWi1FYWAilUomDBw+itbUVEydOxMyZMyGXy4d977TJkycjPj4eiYmJ2LFjB3p6ehASEvLIhk6kUim+/fZbKJVK7N+/Hx0dHbCxsYGXl5fq7IBYLIZCoYBSqURsbCwsLCywfPlyKBQKtT4PiIuLw4cffoj4+Hh0dXVh+vTpqgPko3hfdPk+/FtERUXB2NgYBQUFqKysxLRp0xASEgIfH5+HOisxbdo0FBUVITk5GUePHsXhw4dhZmaGqVOnQiqVIiAgYMT7HupzfBT1kZGRmDt3LtLT05Geno6uri5YW1tj9uzZ2LFjx4jbPRhTU1OkpaUhKysL+fn5+OyzzwAAtra2mDdvnsbUHl3o2vcHbfOw3/F3330XxcXFqK6uRk1NDe7cuQMLCws4OztDLpcjLCxMrV6X14uJiYG7uzuysrKQkpICQRDw1FNPwc/Pb8jwqOtxZLh07asux6DhHB8B4Pnnn8d7772H9PR07N69G/b29ti8eTMsLCxQW1s74r4NGM73KywsDDt37oS/v7/G3OSxxkj4t83Qp3+l1atX47fffsOPP/447G0G5sRGRETgrbfe0mPriIiI/puys7Oxfft2ZGZmjplnzQ/GYOZ00vBom4NVU1OD06dP6/xlz8zMhLGx8Zi+0o6IiGi09Pf3Izs7Gw4ODlrvIT3WGMzwOg3Pp59+isbGRnh7e8PS0hINDQ3IycnBhAkTsGXLlgdu39PTg+LiYvz666/Yv38/goKCtN6Dj4iIiLRrbm7GTz/9hOPHj+PixYuIjY3V6x1PHhcOr5Oa0tJSpKam4sqVK2hra8OkSZPg4+ODqKgo1RNChnL79m14enrCzMwMzz33HOLi4rQ+d5aIiIi0O3LkCDZt2gQrKyuEhoYiOjqaoZOIiIiIaDg4p5OIiIiI9I6hk4iIiIj0jqGTiIiIiPSOoZOIiIiI9I6hk4iIiIj0jqGTiIiIiPSOoZOIiIiI9I6hk4iIiIj0zuBC57Vr1zB79myUlJSMdlMemebmZjg6OiI+Pn60mzIsgiAgKCgI27ZtG+2mEBER0WNicKEzNjYWLi4u8Pf3H+2mGCwjIyNs3boVeXl5uHDhwmg3h4iIiB4DgwqdP//8M0pKSrB+/fr/xDNMxzKJRAIHBwckJiaOdlOIiIjoMTCo0JmdnQ0zMzMEBASMdlMIQHBwMMrLy9Hc3DzaTSEiIiI9M5jQKQgCDh8+DC8vL5ibm6ut6+3txd69e+Hv7w93d3d4eHhAKpXigw8+0NhPXV0d5HI5PD09MXv2bKxYsQKJiYno6+t7qFpt0tLSEB4eDm9vb7i4uMDX1xcxMTFobW0ddJuioiIEBATA1dUVixcvRkJCgtbXa2howJtvvokFCxbA1dUVK1euhFKpVKuNi4uDo6MjGhoaNLZva2uDm5sbIiIiRtznZcuWoa+vD6WlpcN6P4iIiGjsMh3tBjwuly9fxu3bt/HMM89orPv444+RkZGB0NBQvPbaa+jv78e1a9dw/Phxtbpjx45BLpdjxowZePXVV2FlZYXTp09DoVCgvr4ee/fuHVHtYJRKJZYsWQKxWAxLS0tcuHABeXl5qK2tRVFREZ588km1+oqKCmRkZGD9+vWws7NDWVkZEhIS0NTUhE8++URVV1dXh7CwMJiammLdunWwtbVFRUUFdu/ejbq6OiQlJQG4fyYyJSUF+fn5iImJUXutQ4cOobu7G8HBwSPus5ubG8aNG4fq6mq88sorD3w/iIiIaAwTDMShQ4cEkUgk5OXlaazz9fUV3njjjSG3//PPP4WFCxcKMplM6O7uVluXnJwsiEQi4dSpUzrXDqW9vV1jWUFBgSASiYTCwkLVsqamJkEkEgmOjo7CuXPnVMv7+/uFiIgIQSQSCefPn1ctX716teDk5CTU19er1W7evFkQiURCRUWFarlMJhO8vb2F3t5etXaEhoYKnp6eQk9Pz0P1ecWKFYKfn98D3wsiIiIa2wxmeH1gSHrSpEka6ywtLVFfX691GHnAiRMncPPmTaxbtw6CIKC7u1v1RyaTqWp0rR3K+PHjVT/39fWhu7sby5Ytg7GxMc6ePatR7+PjAw8PD9W/jYyMIJfLAUB1i6hbt26htrYWfn5+cHV1Vavd5trfBwAABRlJREFUvHmzWi0AhISE4Pfff1drb2NjI2prayGTyWBqavpQfbayssKtW7ce+F4QERHR2GYww+sDV6sLgqCxLiYmBpGRkQgMDIS9vT0WLVoEiUQCPz8/mJiYALg/PA8A0dHRiI6O1voaA+FJl9qhnDp1CkqlEmfOnEFbW5vaurt372rUOzk5DbqssbFR7W9nZ2ettSYmJqoaAAgKCsLOnTuRn5+PpUuXAgDy8/MhCILa0PpI+ywIAu8kQEREZAAMJnROnjwZgPawJhaLUVlZiWPHjqGqqgpVVVXIzc2Fh4cHvv76a5iZmanC6o4dO9TOJv6dtbU1AOhUO5iamhqsW7cObm5ueP/99zF9+nSMGzcOgiBg7dq16O/v19hmOOFNW+geipWVFSQSCY4ePYqOjg6MHz8eBQUFcHZ2xpw5czT2q2uf79y5o/psiIiI6L/LYEKni4sLAODq1ata10+YMAEymUw1FJycnIxdu3bh4MGDCA0NhUgkAgB0dXVh/vz5Q76WLrWD+e6779Df34+MjAxYWVmpljc2Ng569fulS5c0ll25cgUAYG9vDwBwcHAYsravr09VOyA4OBilpaU4fPgwHB0dcfXqVY2zmSPpc29vL27cuAGJRDKseiIiIhq7DGZO56xZszBlyhScO3dObbkgCLh3755G/bx58wD8/8zokiVLYG1tjdTUVDQ1NWnUd3Z2or29XefawQyctezt7VVbrlQqB92mqqpK4wk/X3zxBQConsBkbW0NT09PlJWV4eLFi6o6QRBUV5dLpVK1fUgkElhZWSE/Px/5+fkwNjbGqlWr1GpG0uf6+np0d3fD29tbbfn169e1TikgIiKisctgznQCQGBgIHJzc9HZ2am6V+dff/0Fb29vrFy5Ek8//TRsbW3R3NyM7OxsmJubqwKYubk54uPjIZfLIZVKsWbNGsyaNQv37t3D5cuXUVJSgrS0NDz77LM61Q7G398f+/btw8svv4yXXnoJRkZGKCsrwx9//KG6eOefXF1dER4ejvXr18PW1hbl5eU4duwYVq9erTbkvX37doSFhWHNmjVqt0yqrKyEVCrF8uXL1fZramoKmUyGzMxM1NXVYfHixbCzs1OrGUmfKyoqYGJiovFI0tTUVGRlZSE9PV01j5SIiIjGNoMKneHh4cjMzMSRI0cQEhIC4H6g2rBhA6qrq3Hy5Em0t7fDxsYGixYtQkREhNpQs1gsRmFhIZRKJQ4ePIjW1lZMnDgRM2fOhFwuV7s4R5dabcRiMRQKBZRKJWJjY2FhYYHly5dDoVDA19dX6zYSiQQuLi5ISkrCL7/8Amtra0RGRmLLli1qdXPnzsWBAwewZ88e5OTkoLOzE/b29nj77bexceNGrfsOCQlBRkYGOjs71S4g+mebdelzQUEBVqxYgalTpw75XhAREdHYZyToemXJGBcREYFr166hqKiIV02PovLycmzcuBGFhYVqFyQRERHRf5PBzOkc8M477+DSpUt89OIoS0hIQHBwMAMnERGRgTC4M51ERERE9PgZ3JlOIiIiInr8GDqJiIiISO8YOomIiIhI7xg6iYiIiEjvGDqJiIiISO8YOomIiIhI7xg6iYiIiEjvGDqJiIiISO8YOomIiIhI7xg6iYiIiEjvGDqJiIiISO8YOomIiIhI7xg6iYiIiEjvGDqJiIiISO8YOomIiIhI7xg6iYiIiEjvGDqJiIiISO8YOomIiIhI7xg6iYiIiEjv/gcIyU6UARQbDAAAAABJRU5ErkJggg==)",
"_____no_output_____"
],
[
"They also explain what they mean by a \"vital record\":",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 09-51-45.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAV0AAAKWCAYAAADqT0pdAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDA5OjUxOjQ5IENFU1Quk9j+AAAgAElEQVR4nOzdd1hUZ97/8Tcdht6rVAVpooC9N9RoLLElJiabrBsTk2w2ye/ZbPZ5NmVb1mx2UzZ1s0nWGBM19oIFLCigooJgQRGQDgJD721+fyAjA0yhOCK5X9e114aZU+5zzpzv3Oee4/noyGQyGYIgCIJW6N7vBgiCIPyciKIrCIKgRaLoCoIgaJG+tKLqfrdBEARhyLC1slD5vujpCoIgaJEouoIgCFokiq4gCIIWiaIrCIKgRaLoCoIgaJEouoIgCFokiq4gCIIWiaIrCIKgRaLoCoIgaJEouoIgCFokiq4gCIIWiaIrCIKgRaLoCoIgaJEouoIgCFokiq4gCIIWiaIrCIKgRaLoCoIgaJEouoIgCFokiq4gCIIWiaIrCIKgRVotuhu/2szFq9flf1dW1/DBf7fy0p//wct/+adGy8gtKua5tzZSU1ff49998fmPu9h6MKrP8/eka7siY+LZ+NVmlfMcO3OBP376db/WOxDLUObDTVvZHRVzT5bdF33Z1ntxrNW1YyA+ow+6trY2/vDRv0nLyr3fTbnvtFZ0L169TmNTM6EBI+WvHYk9R2trK39+eT1/e+0FbTVFK0xNjJk4OggDfT2trtfZwZaQkSP6tYzsgiKee2sjdQ2NA9Sqe6Mv2+rn5YGXm8s9apGgjK6uLgumTWTX0RP3uyn3nb62VnTszAUmhwajo3P3tZKycjzdnLE0N9NWM7TGxtKCp5Yt1Pp6A3y8CPDx0vp6tUkmA5msrU/bOmtC2D1qlaBOWOBItkVGkZmbj/cw1/vdnPtGK0VXWlFJZm4+zyxfJH/ttY0fU1tXz+W0DKLjzzM5dBTL583i1Xc/5I31T+Hh4iSf9rd//4TlETMZHxKocj0lZRW8+fGX/P65pxnm5CB/PeZ8EgdOxPK3115AT099514mg+j4BE5dSKKiqgY7ayvmThrLpNBR8mmupd9i++FjlJZX4ObowPypE/hi627efW0D1hbm5BYV85fPv+X913+NmcTkblsSkjh0+gy19fUEDvfmicXzFd7v6srNDPYfj6WguBRzMwnhgf48PGsKBvo9H7pjZy4Ql5jMmy/8EmgfEnBxsKe5pYVLqWno6OgwaUwwS2ZPV/gC7CCtqOTdLzcB8Oq7HwIQHuTPupWLAWiTtbEr6iRxF5PR09PrtqyGxkZ2Hj3JpdQ0mppbcHNyYHnEDJUnWWV1Ddsio7mWcQuZTIaflwerFszBztoSgPScPN7/egvrVy9l3/FYSssreGntSnILixW2tam5ma0Ho7h49QYG+npMDR+NtKISA3191i5ZALQPL1hbmPPowrkAbHjnPR59aC6Xrt8kM7cAM4kJy+ZMJyzo7hXZwZh4zl++hrSiEjOJhJCRw1k2ZwZGhgZKt0mVxqZmfjhwhJtZuVTX1mFjZcHMcWHMGB8qn0aT46Zue+saGjU6nzTZvqNx54iOP09DYxNBI7zxcnPh1IUk/vTyekCzc8bI0AA/Lw8SUq6JonuvpWfnITExxs7aSv7aP17/NR9v3o6rgz3L580E6PflrL2NFb6eHsQnprD6oTny1+MTU5g4Olijggtw4GQsCSlXWbVgDs72duQWFrF57yGMjY0IDfCjvKqaz3/cxbSxY5gxbgy3pWVsizymdrkFxaWk3Ejn12tX0tjUzOa9h/luTyQb1izvcfrrmdl8tX0vK+fPxs/Lg8qaGrYejKKpuVleNDRx+sIl1i5ZwGMLIygqlfLef77Hw8WJMQF+3aa1tbLkjfVP8e6Xm/jnG79BYmyk8H7cxWSmho/h1afXUFBcwqbdkQrL+uyHnejr6bNhzXLMJBISr13no++28dYL67Cxsui2PpkMvti6G5lMxstPrkZPT48dh4/x6ZYd/GHDM+jq3v1mOHbmAi89sRIrC3NaW1vJLSxWWNauoydJzczm+ccewdrSnKi4BFJupBMWOLLrahUcjj3Ls6uW4uHiTELKVb7dfQAfd1esLMwB0NfTZc2iedhaWSCtqGJbZBS7o0/y6EOaH4POWtvacLS1YfaEcEwlJmTlFfL9/sNYmpsqHBN1x62v29uVuu1LvHaDfcdP89jCCPy8PLiSlsG+E6cxNTGWL0PdOdPB09WZC1eud2vDz4lWxnSlFZVYmmlnCGFqWAgJKVdpaWkFoKC4hOyCIiaFBms0f3NzC1FxCTy2KIJgXx/srC0ZE+DHrIljib2QDLSfDDZWFqyYNwt7G2uCRvgwZ9JYtctubW3lF48sxMXBHi83F55YPI+UG+kUS8t7nP7QqXhmjg9jSlgI9jZWDHd3Y9WCOcQlpiCTyTTcIzDS24NxowLQ1dXBxcGOwBFepGZmazx/Z66ODiybOx1XR3vGBgcQOPzusm5m55KZV8Czq5fi5eaCvY0V86ZMaC9ml6/1uLyb2Tlk5RfwzPKH8XJzwd3ZkV+uWExxWRkpaekK0y6dOx0bKwt0dXUwMFDsLzQ1NxN7MZlH5k5npLcHjrY2rFkUgbGR4pdGT2aOD8PT1RkdHRgfEojE2Jj0nDz5+/OmTMDXcxi2Vpb4eg5jecRMzl9O7e2uk5MYG/HQ9Em4uzhha2VJWNBIpo8dQ0KXZao6bv3Z3q7Ubd/xMxeYODqYyaGjsLO2ZMb4UPx9POXva3LOdLAyN0NaUdnrNg4lWunpNre0aO0HpdH+vvwYGUXy9ZuEBY0kLjEFH3c3nOxsNZq/qFRKU3MzH3+3vdt79jbtPfWiEqn8JO3g5eqsdtl2NlaYm0rkf3u6uqCrq0tRqRQHW+tu02cX3ObGrRwOnz7b7b2K6hqs7/TE1HGyV9x2c4mEiuoajebtyr3TZSqAlaU5ldW1AOQUFNHS0spv/vpBt/k69l1XhcVSzE1NFbbf0twMO2srCotLGd3phzIXB3ul7Sotr6SltVXhRzJdXV2FYSZlun42zCQmVNXUyf9OuZHOoVNnKCyR0tB492qssam5z0MMR2PPEZeUQllFFc0tLUB7L1ChXSqOW3+2tyt121dUKmXSGMVOi6erMzkFRYBm50wHAwN9mluae93GoUQrRddMYkJtfYPa6XR7GGOE9ttNNKWvr8eEkEDiklIY7e9LQso1Hpk7Q+P5O3qQb77wjNKTXPM+pkZrVNmWRx+aqzDW1xe6Ot0vaHrTU+5MX0/xy1MHHfmy2mQyzCQmvP/6r3u1TJ2eBpd7eF31F3ffj4puDx+8jm26XVrGl9t2s2rBHEID/DCTmJCRk8/732yhtRefy87iE1M4EneOdSsW4+XmjLGREQdj4km6dkOxXSqPm/rt1eR80mT71K1Jk3OmQ219A2YSicpphjqtDC8Mc3akrLKKhsYmldMZGxlhoK9PTd3dXkZtfUOv72+cEhZCakYWMecTaW5pIbQXY1xO9rYYGOiTfD1d6TTO9rZk5RfSuW7dyi9Uu+zSsgqqa+9uW1Z+AW1tMqW9cHdnR5Jv3NS47QOlo7DKellU3J2dqKmrJyM3X+N5nB1sqaqpoaSsQv5aZXUNpeUVONtrdnUCYGdthb6eHrfyCuSvtbW1kVtUrGIu9bLyCzE3NWX62DGYm0rQ0dEhp/B2v5aZkZtP4HAv/H085cMBub1cpibbq8n5pMn2OdvZdvt8Z3X6W5NzpkNBcSnuzk5qpxvKtFJ0vdxcMDYyJCM3T+20I709OH0hmdbWNhqbmtkWGdVjT0QVZ3s7vN1c2HX0JOFB/r26BDQ0MGDe5PEcOnWGE+cucltaRl5RMTHnkzh+9iIAU8NHU1ZRxY4jxykpK+fKzUyi488DoKqlenp6/HfXQQqKS7mVV8D3+44Q7OvT49ACwMIZk7memc3Wg1HkFRVzW1pG0rUbbIuM1nh7+sLawhxdXR1S0jKorq1T+2XZwdfTHV9Pd77ZsZ9LqWmUlldyK6+AfcdPc13JGLKvpzueri58s3M/WfmF5BTe5usd+3CwsSHYd7jGbTY0MGBKWAi7o2K4cSubYmk5Pxw4SkNjo9KetCac7G2prq0l+86ldP7tEqLjE/q8PGgfzriVV0jtneJ38ep1rqRl9GoZmm6vuvNJk+2bNTGcM5cuE5+YQml5JScTEknNyKLj067JOdMhPTuXoBHe8r8bm5o5eS5R4Ut3qNPK8IKBvj4TQoJISLlG4HBvldM+tjCCTXsi+e3f/4WpxISF0ycrPWFVmRw6iozcfCZ3umVFU4tmTsHc1JSTCYnsPHICYyND3JwciJg8HmgvSs89toyfDh0nJiERNycHHp45hW927ld6KxeAi4MdQb7efPzdNmrrGwgY7sUTi+crnX6ktwcvP7maAydjee8/36Orq4ODrQ0T1Nw6118SE2OWzZ3BnugYvttzkLDAu7eMqaKjAy88vpx9x0+zLTKaqtpaLExN8XZ3ZUJIkNL5nnt0GVsjo/ngv1sBGb6e7jy5dGGvv2wfiZhBY3Mzn/2wC319PaaFj2aktyf6/fg9wcPFiUfmzuCLH3cB7fdfPzR9Et/vO9znZc4cH0ZhSSlvf/If9PX18R7mwpxJY7lyM7NXy9Fke9WdT5psX2iAH6XlFew5dkp+y9jsCeFcvHp3OETdOQPtBb2kvIJxnT6/dfX1bI2M4rnHHlE67j/U6JSWVw7sEKUS5VXV/Omzb/jDhmc0/gGoPw6ejOPi1Ru8+cIz93xdAGcuXeGnw8f4x+sv93j/q6B9MpmMNz/+NzPHhzFrQvj9bs49p83t3bQnkvr6Bp577JFezWNpZsrSOdPlr11Oy2DT7oP85ZXnMDI0vBdN1TrbHm6N7Exr/yLN2sKctUsWUFZReU+LbmNTE4UlUk4mJLJk9rR7tp7Yi8l4ujpjbiohp/A2e4+dYtKYYFFw76P82yXclpbh5eZCU1Mz0WfOU1VT1+v7Vh8U2trelpZWTl24RLCvN3p6eqTcSOdc8lWee3SZxstoa5PhYGPNzC4/CqfdymHu5HFDpuBqQms9XW35Zud+Eq/dYIy/H08/sqjXl6ia+unwMc5fTqWuvgFrSwvGBfvz0PTJGv8DDGHg5RYVs2n3QYql5ejr6TLM2ZEV82YxzNnxfjftntDW9ra0tPLx5u3kFt2mpbUVR1sb5k+ZQHiw/4CuZ6hQ19MdckVXEAThflJXdEW3TBAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXRpf8j6Hz/9mg3v/J1Pt+zQeD5Nwib7KiYhid/94zOef3sjUXGa/Vv/V979kMQ7T6q612GIImxREPpGa/8ibTDbGhlFwHAvXn16jdJnJ7z8l3/y9PKHFZ7veq/U1NWz7VAU61YuYaSXB4YGfXtm6710v4I3BeFBJ3q6tAdk+nq6YyYx6fNDqQeStKKStjYZAT5eSEyM+/XAlnulI3jz5/TPNwVhIGitpxufmMLh2LNI7zx7Yc7Ecb0O4hvo0MOC4hL++Ok3QHu2F8DTjyzqFoD5+w8+p7GpWf4kJgszU977nxfl759Lvsr+E6epqWvA38eTtYvnI+mUH9WbcMmT5xLZGhkFIE9g6Ai7VLcP1cnIzWfXkRNkFxZhYmREWOBIlkfMxMBAnys3M/lq+14+eONldHV1KZaW8+bH/2ba2DGsWRQBwN5jp8jKK+Tlp1Z3C96MjInncloGY4P9iT5znoaGRvy8u+8LdQGHgjDUaaXoXrmZwff7D7M8YiZBvj6k3cphW2Q0phJjxgYHyKdTF8Q30KGHLg72fPHO67z4p/dZv3oZwb4+Pbb/r688r3R4oahUyoUrqaxbuYTmlhY27T7IvhOn5aF+vQ2XnDE+FA9XJzZ+tZmP/vcVeU9S032oTFVNLf/avJ3QAD+eWDKfssoqvt97mNa2Nh5/eB4jPNxobmkhu6AILzcX0rJyMJOYcOPW3ccApt3KIUjJPoL2B3F7ujrz9ovraGlp5aPvtinsC00CDs8lX+XbXQe6pSgLwlChleGFo7HnCA/yZ/bEsTja2jA1fDRTwkI4EntOYTpVQXz3OvSwr9raZPxyxcN4ujozwmMYM8eHcT3jbqEaqHBJTfehMqfOJyExMeaJxfNxtrcjcLg3KxfMJi4xmdq6eowMDXF3duTGrRwA0rJymDE+jLKKKiqra2hqbiaroBBfz2FK12FkaMCKebMwNDBAYmLMpNBRCvtCXcAhgLmpBC83F/R0xciXMDRppadbWCLt1hsb7u52p/AgHz5QFcR3r0MP+8rB1lohgdXKwpyq2lr53wMVLqnpPlQ1v/cwV3Q7FbPh7m60tckokpbhI3HF18udtFs5zJ86gbSsXGZNCOdGZvadXq8EPV09PF1dlK7DztpK4Slr5qYShX2hLuAQIGC4FwHDvVRvjCA8wAbV3QuqgvjudehhX3ULatTRUejBDlS45EDQURIm1PGqr6c7MQmJFJaU0tDYhLuzE75e7ty4lYO5qQSfYa4qH12p20PvVGFf9Kv1gjA0aOUaztnelvQcxXy09Jw8nOxsNH7o9/0MPYT2fLPeBjXCwIVL9ncfOtvbcisvn7a2u6UvPScPXV0dHG1tAO6M67ZyNPYcw93d0NXVwdezveim3crB18u9f9ugJuBQEH4OtFJ0I6ZM4PzlVI6fvUCxtJzTFy4RezGZiCnj1c98x/0MPQSws7IkNTObqppajeLkOwxUuGR/9+G0sWOoqatny/7DFJVIuZZ+i58OHWNyaAimd36w6hjXPZdyFb87BdZ7mAvlVVXcyi9QOZ6rCXUBhwDXMm7x96+3UN/Q2K91CcJgpZXhhaAR3jzx8DwOx55l59ETWFuYs3zeTI1+de9wv0MPl82dwdbIKOISkzE1MVG4ZUyVgQqX7O8+tDAz5aW1q9h55AR//uJbjA0NCQ/yZ3nETIXp/Lw8yMovlPdqDfT18XZzISu/SOV4riY0CTisrqkjIyeP1j5cVQjCg0AkRwj3VV8CDgVhMBPJEcKg0dLSyvGzFykpK6essoqTCYmcS77KpNBR97tpgqA1g+ruBWHou5Saxv4Tp+UBh888sohRfr0bXxeEB5kYXhAEQRhAYnhBEARhEBFFVxAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXQFQRC0SBRdQRAELRJFVxAEQYt+1kX32JkL/PHTr+93M+TSs/P446dfs+Gdv/Pplh33uzkAbHjnPa6mZyp9P7eomOfe2khNXf09Wf791tP23czO5Z1PvmbDO+/x6ZYdpGZm8dxbG+9jK4UHif79bsBAamlt5Xfvf0pzSyt/e20DJsZG97tJvbI1MoqA4V68+vQaDPQfjENjamLMxNFBGOjrqZwuKi6BcylX+b/nn9ZSy3rv0y07sLexZtWC2fLXetq+bZHRBI7w4rVn2o+TtKKScaMCBqwd2QVFvPvlJv75xm+QPGCfYUG9IdXTTU69iZ21Fd5uLiRcvna/m9NrJWXl+Hq6YyYxwcjQoE/LaGltHeBWqWZjacFTyxZiZGjY4/syGbS1tWm1Tb3V2qq8fT1tX9fj5OJgxzPLH9ZGU4Uh4MHoTmkoLjGF8SGBSIyNOXb2AtPHjun1MuITUzgcexZpRSXWFubMmTiOGeND5e9veOc9Hn1oLpeu3yQztwAziQnL5kwnLGikfJqSsgq+33eIjNx8rMzNWTZnOj8dOc6yOdMZHxLYbZ0FxSX88dNvAPjsh50APP3IIsaHBJKRm8+uIyfILizCxMiIsMCRLI+YiYFB+6H7cNNWnOxsaWxqIiUtA1cHe159+jE2vPMeaxcv4OLVG9zIysbGwoLHF8/H3tqSzfsOk56dh72NFU8ufQgPFyeV+6S8spqPv9vOzexcLMxMWTJ7mrxnl1tUzF8+/5b3X/81ZhIT0nPyeP/rLaxfvZR9x2MpLa9g1oQwjsSeA5Bfhq9ZFMG0O8enpraeL7bu5lr6LSzMTLvtz65kMjh+9jwx55Moq6jCTGLCuFGBPBIxA4CGxkZ2Hj3JpdQ0mppbcHNyYHnEDLyHuQIQGRPP5bQMQkaOICYhkcqaWsaPCuByWgYAx89eAODtl9bR3NIq376qmppux6mzL955Xf7fV9MzOXAijryiYgwN9PFyc+XZ1UswNDAgKTWNo7HnKCyRoqeni88wV1YtmIOdtSXSikre/XITAK+++yEA4UH+rFu5GJkMouMTOHUhiYqqGuysrZg7aSyTQkepPH7C4DIoi+7ltAwaG5sAMDeT4OfloXaesooq0rJyeHr5IgwNDPjhwBFyi4oZ5uSg8Xqv3Mzg+/2HWR4xkyBfH9Ju5bAtMhpTiTFjg+9ePh6OPcuzq5bi4eJMQspVvt19AB93V6wszJHJ4IutuzCTSHjj2adobWtje2Q0tSrGPF0c7Pnindd58U/vs371MoJ9fQCoqqnlX5u3ExrgxxNL5lNWWcX3ew/T2tbG4w/Pk89/+uIl1iyax+OL59Paqae770Qsi2dNZdnc6RyNO8fXO/bhaGvD1PAQlkfM5MDJOL7deYC3XlyHjo7y/bL32CmWR8xkzcMRXLhynW93HcDRzkZlsT525gIvPbESKwtzWltbMZNIlA4v7ImOYdHMKSyeNYWElGsK+7Mn+0+c5vjZi6yYNxM/L3eqa+vIKbgtf/+zH3air6fPhjXLMZNISLx2nY++28ZbL6zDxsoCgJzCIlwd7XnrxV8COhgbGVJTV99teCG3qFjtcUrNzOKjTdvk06VmZPHplh1ETB7P2iULkMlkpGZk0dYmA6C5uYX5Uyfg4mBHY1Mzh06d4Yutu/jf536BrZUlb6x/qsfhhQMnY0lIucqqBXNwtrcjt7CIzXsPYWxsRGiAn/IDKAwqg254ISM3Hwdba8KD/QkP9sfQwICC4lK188UlpeDv44m5qQQjQwNG+/sSl5jcq3UfjT1HeJA/syeOvVOcRjMlLETeS+swc3wYnq7O6Ogg71mn5+QBcONWNgXFpfzikYW4Otrj7uzIE4vn09zS0qu2AJw6n4TExJgnFs/H2d6OwOHerFwwm7jEZIUiPsJjGJNDR6Gvp6dwGTw5dBQTRwfh6mjPktnTqKyuIWiEN2ODA3B1tGfxrCkUlUqpqK5W2Y5xowKZMDoIO2sr5k+dgL+3J8fOnFc5z9K507GxskBXV0feK1dmUugoJoeOwsWhvZ2GBgby/dlVU3MzUfEJLJ09jSlhIdjbWOM9zFV+NXIzO5fMvAKeXb0ULzcX7G2smDdlQvsXZKchJ309fVYtmI2xkRHGRj0PjfRVZEw8ISN9WTpnOi4Odrg62jNn0lj5esaNCiBk5Ajsbaxxc3LgqWUPUVBcQmGJ8s95c3MLUXEJPLYogmBfH+ysLRkT4MesiWOJvdC7z7lwfw26nm55ZRU+dy4DAbzcXLhwJRUXBzul88hkMs4kXZZfXgJMCAniq5/2tl+Ka/ijVGGJVKFHCzDc3Y24xBRkMuS9QSc7W4VpzCQmVNXUAVBUKsXKwhzrTr00J3tbhRP7r19uIq+ovWdma2XJn15er7Q93sNc0dW9+9043N2NtjYZRdIyfCTt+8nFwb7H+d2dHeX/bWlmBsCwHl6rrq1TaG9X3m4uin+7u5J8/abS6VW1qSede8w6OjpYmpnJ92dXRaVlNDe3MNK756ufnIIiWlpa+c1fP+j2nr2NlcJ/Gxr0bdxcndyi2yybO0Pp+wXFpeyJjuFWXgE1dXXI2jvAlFVWKd1vRaVSmpqb+fi77d3e67xdwuA36Iqunp4ejU1N8h5bVU0tEmNjlfNcy7hFWWUV3+zczzc798tfb2uTkXQtbUB/WQbQ1e1+LS7rOHNkPc8j6/T6+tVL5T1fPV3Vv/rr0PN1f+dXlX2p6OndLdYdXxidX+t4USZT0mhlNJhe3d0MnSm06U6zlLZJzbrbZDLMJCa8//qv1bTv3n30VTVRJoNPvv+JgOFe/O7ZJ7EyN0dXV4cX//Q+LSp+0OvYH2++8EyvvtCEwWfQFd1RvsOJT0rByd6WtrY2pBVVTBoTrHKeuIspjPH35eFZUxReP372InGJyRoXXWd7W9Jz8pgaPlr+WnpOHk52NirHPDtzsreloqqa8qpqee+xqERKY1OTfBpbK0uN23M2+QptbTJ5oU/PyUNXVwdHWxvNGjQAMvMKFH7YyswtwNneVsUc3enp6cnHNPvDyd4WAwN9rmdm49RDG9ydnaipqycjN1/hiknjNsr6f6eFu7Mj1zOze/wht6K6mrLKKuZNGS//HOTfLlG4g0Jfr/0LS9bpro+O7U6+ni6K7gNu0I3p6unpMjV8NA421rg42KstuDV19aTcSGfimGBcHOwV/jc5dBRpWTmUlFVotO6IKRM4fzmV42cvUCwt5/SFS8ReTCZiyniN2+/n5YGLgx3/3XWQ/Nsl5Bbe5vt9hzHQ10dH08p9x7SxY6ipq2fL/sMUlUi5ln6Lnw4dY3JoCKYSk14tqz8SUq5yLvkqpeUVHIk9S2rmLWZNCO/VMtp/ma8gt/A2NXX1fRrjBjA0MGDOxLHsOXaKuMQUSsoqyMovJCYhCQBfT3d8Pd35Zsd+LqWmUVpeya28AvYdP831zGy1bczKK0RaUUlNXX2fvyQWTJvIpdQ09kTHUFhSSmFJKcfPXqChsQkLU1MkJsbyOyVq6xvYfiha4bNhbdHe+01Jy6C6to6GxiYMDQyYN3k8h06d4cS5i9yWlpFXVEzM+SSOn73Yp3YK98eg6+l2sDQ302i6s5euoK+vR4CPV7f3vNxcsLawIC4xmaVzpqtdVtAIb554eB6HY8+y8+gJrC3MWT5vZrdxXlV0dOC5R5exee9h3v1yE1YW7beM/XDgSK8uuQEszEx5ae0qdh45wZ+/+BZjQ0PCg/xZHjGzV8vpryWzpxhWiE8AACAASURBVHHm0mU27zuEhakpv1i2CE9X514tI2iENyEjR/DBpq3U1Tco3DLWW4tnTcPE2IhDp87ww4EjmJuaMn5U+614OjrwwuPL2Xf8NNsio6mqrcXC1BRvd1cmhASpXO6s8eFk5Rfyzidf09TczNsvretT+wKGe7FhzXIOnIglOv48RoYGeA9zZUpYCHp6uvxq1RK2Rx7jaOw5jI2NWDR9Mln5hfL5JSbGLJs7gz3RMXy35yBhge23jC2aOQVzU1NOJiSy88gJjI0McXNyIGKy5p0C4f7TKS2v7P81n6BSWUUVv//gc/7v+adx68UtbIIgPHhs79yWqMyg7ek+yFJupGNsZISTnQ1llVX8dPg47s6OouAKgiCK7r1Q39DI9kPRVFTVYCoxwc/LnZXzZ6ufURCEIU8MLwiCIAwgdcMLg+7uBUEQhKFMFF1BEAQtEkVXEARBi0TRFQRB0CJRdAVBELRIFF1BEAQtEkVXEARBi0TRVWGwJNUOtpTg9Jw8nntro8qH1vQnaVmT5d9vPW3fyYREXn//U557ayNRcQnsPXaKf377w31qYd9FxsSz8avNWl3n5z/uYuvBKK2u834ZUv8i7UFKA375L//k6eUPM3rkCLXTPogpwc4OtoRosG09JfAONj0dq67bV1NXz/ZDx1i3cjEjvTwwNDAgJS19QB+U/iAkKveVn5cHpiaqn5s9VDwYZ7CGOtKATYyMSLh8rU/BlPdaS2ur/HmpmiopK2fxrKmY9eNxjn1Zb38E+Hj1+OS3Dq2tbd0eXj7YqNpnXbdPWlFJW1sbAT5e8pQQkVumuVkTwu53E7RmSBXd3qYBF0vL2RYZTWZePq2tbdhZW/JIxAyCRvjIp1GXVNuXtN7Sigoam5r54sddQPsjHN/7nxe7tU9VSrC61OKe1rt49lTe/3oLrz79GNsPHeN2aRmers6sW7mY29IytkVGU1pewQiPYTy1bCHmphKV++9mdi47j5zgtrQMVwd7Hl88Xx4RdOzMBeISk3nzhV8CvUvg7ZBTUMT2Q8coKCnF1cGetUsW4Oqo/AHeDY1N7I4+yaXUm9TW12NrZcnC6ZPlD7EvKpWy88gJ0rJyMdDXw9fTnZULZssfNt+bY9V5+06eS2RrZPulcdeYIF/PYbz69BpAfYrxwZh4zl++hrSiEjOJhJCRw1k2ZwZGhgacSbrMzqMngO6JyurSj3tyNT2TvdGnKCyVYqCnh7O9Hb9csVge3AlwLvkq+0+cpqauAX8fT9Yuno/kTm+0pbWVvcdOcS75KnX1Dbg62vNIxAx5iOzf/v0dYwJ8mTdlAgDf7NxPQso13vufF7EwM6WpuZlX3v2QV3/xGD7ubnz+4y6sLcx5dOFc+bFwcbCnuaWFS6lp6OjoMGlMMEtmT5cHCvQ2dXuwGJRFV1tpwN/ticTKwpzfPfskBnr6FJSUYmyoGFKoKqm2r2m9RoaGGg0vKEuf1TS1uOt6O5Jtd0fFsHzeTCTGxmzee4ivtu+lTSZj1YI5GBros2l3JPuOn1bYhp7sOnqS1Q/NwcLMlAMn4vjk+5/482/WK72k1jSBtyOU8qfDx3l41lSszM3YHRXDNzv384cNz/S4bJkMPtnyE9W1daxdsgAnOxtuS8toam4fF66qqeUf3/zQrch9tmUHb6x/Sp5D15djNWN8KB6uTmz8ajMf/e8r8qipvcdOkdEpYFNdirG+ni5rFs3D1soCaUUV2yKj2B19kkcfmsvEMcHU1NX3OLygSfpxZ41NTXyxdTcPz5jC2GB/mlpayMorVEhHKSqVcuFKKutWLqG5pYVNuw+y78RpHn2ovSjuO3aas5eu8MTi+Tja2RCTkMS/vv+Jt1/8FXbWlvh6uXPjVo686KZl5WImMSHtVg7hwf5k5OSjp6uHp6tLt/Z1OH3hEmuXLOCxhREUlUp57z/f4+HixJgAvz6lbg8Wg+76TptpwNLKKkZ4DsPR1gYbKwuCRngz3MNNYRpVSbX9TevtK01Ti5Wtd9ncGfh7e+Lh4sSciWNJz8lj1YLZ+Hm54+XmwszxYVzPzFLbjsWzpuLr6Y6TnS1rlyygpbWVhJRrSqfvbQLvsrkzCBrhjZuTA0vnTCP/dglVNbU9TpuWlUN6dh7rVy8laIQ3dtZWBA73Zoy/L9B+AtvbWLNy/iyc7e1wtrfjF0sXUlgqVXiA+EAfqw7qUowB5k2ZgK/nMGytLPH1HMbyiJmcv5yqcrmaph93Vl1bR3NzC6P8hmNtaYGjrQ3jQwKxtrxboNvaZPxyxcN4ujozwmNY+2cioz15o7mlhePnLrBk9jRCRo7Ayc6WVQvm4GBjI79i8fV0JyMnj7a2Noql5dQ3NDI1fDQ3snKA9uPlM8xV5RDTSG8Pxo0KQFdXBxcHOwJHeJF6J/1jIFO3tW3Q9XS1mQY8Z2I42yKjOH/5Gv4+XozxH9Etf0pVUm1/03o7q29o5LWNH91t/+hgnlyyoMdpNU0t1igl+E5CxzCnzq+ZUl3bcxpvZ16dUoKNDA1wc3SgsESqdPreJvB6uNxtk9WdIYDq2joszEy7TZtbeBtzU4nSbc4pKCIzN19+ad5ZSVmF/FL8XuWPqUsxhvbnMB86dYbCEikNjY3y1xubmjEyVHL1oGH6cWe2VlaM9vflr19uIsDHk5HenoQG+insVwdba4yN7v4QbWVhTlVt+xdeaXkFLS2tjPAYJn9fRwd83F3lMfIjPNxobmklK7+QguJSRni4MdLbkx/2HwEg7VYOQb53h/F60jUDz1wioaK6BtAsdXuwGnRFV5tpwLMnjiVk5AhSbmSQmnGLgyfjWDV/tkLvQ11SbX/SejszNjKUj38CmBj1/84LTVKCUZIS3OuEYEBpFLKa9iij1+lHrI4MsTYl7ZKpWXebTMaYAD/Wr146oG3UmJr9ebu0jC+37WbVgjmEBvhhJjEhIyef97/ZQmub8rBMTdOPO+uIlMrKL+TKzUzOJl9hd3QMv3lqtfyLtOsPiDo9fSZ6+Oh3nA9Ghoa4Ozty41YOhSWl+Hq54z3MBWllJcXScrIKChU6ST3R1eneC+5N6vZgNeiKrrbTgO2srZg1IYxZE8LYHRVDXFKKQtFVpT9pvXp6egpprzo6OjjZaZawOxCpxQPhVl4Bo/yGA+29sbzbJYwb1bsfMAYugdeJ6to6CopLeuyturs4EZOQSENjo0IPTtM2ylQUPk2oSzHOyi/E3NRU4cffnMLbCtP0lKjcn/RjT1dnPF2dWTRjMhu/2syFK9cVrl6UsbO2Ql9fj4zsPPnnXCaDjJx8/H3u9uR9vdxJu5VDYamUWRPCMdDXx8vVhUOn4tWO56qjSer2YDXoxnS1mQb844GjpGXlUF5ZRV5RMWlZOb2KFu9PWq+dlSWpmdlU1dRSW9+g8TphYFKLB8K+46dJy8rldmkZm/ceQk9Xt1chnjBwCby+nu74uLvx5bY9XLmZSWl5JamZWVxKTQNg+tgx6Ojo8PmPu0jPyaO0vJLrmdls2hNJnZr9359j1UFdirGTvS3VtbVkFxQB7bHs0fEJiu3oIVG5L+nHBcUlHDgZR3ZBEZXVNVzPzKa4rFzjz76Bvj6zJoSz59gpUm6kU1QqZfuhaIrLyhRSon093bmRlU1DYxPuzu3DdL5e7pxLuap2PFedgUzd1rZB19PtoI004IamJjbtjqSypgYTIyOCRnizohexOv1J6102dwZbI6OIS0zG1MSkx1vGlBmI1OKBsGzudH48cJSSsnJcHO158YmVSscelRmoBF4dHXjx8RXsijrJpt0HqW9sxM7KikUzJgPtx+q369ayO+okn/2wk+bmFmwsLfD38ZTf3qd8O/t+rDpTlWLs4eLEI3NnyG9Ns7G04KHpk/h+32H5/MoSlXubfmxkaEhWXiGnzidRV9+AlYUZs8aHMTk0pBfbMhVZm4zv9x2W3zL20hMrsbWylE8zwsMN0GGEh5v8StDPy52DJ+Pw9XLv7e5TMJCp29om4noEQRgSBkvqtkgDFgRhSHpQU7dF0RUE4YH0oKZui+EFQRCEASTSgAVBEAYRUXQFQRC0SBRdQRAELRJFVxAEQYtE0RUEQdAiUXQFQRC0SBRdYcD0JYxSJpOxee8hXv3bRzz31kb5swceBK+8+yGJ125oZV27ok7y48GjWlmXcG+Jovsz9fJf/sml6zd7Nc/Hm7fz8ebtSt/XNIyys+Qb6SRdu8H//PJx/vnGbxSe6ztYRMUl8OfPv+32+rjgAIVnDdwr5ZVVnDqfxEPTJt3zdQn3nvgXaf2g7bDH+x3mWFFVTVjgSKXvqwuj7EmJtBw7ayuc7ZU/pF4dmQxksjaFh8lrw2OLIrSynpjzSfh5umv8EKie3K99JHT3wBfdkrIK3vz4S37/3NMKeWgx55M4cCKWv732Anp6umoDJLcfOkZJWTkvPL5CvoxT55OIik/gTy+vB3oOLnz16ce6takjhDFohDfxSZepra9npLcnTyyeL0/0LSguYdfRGLILCmlqbsHZ3pZlc6cr5MFteOc9VsybTfL1NLLyi5g7aSyLZk7hys0M9h+PpaC4FHMzCeGB/jw8a4r8Adwb3nmPRx+ay6XrN8nMLcBMYqIQqPn7Dz7vMWxRVVBndW0dldU1zJ4YjjLKwijHBvsTfeY8DQ2N+HnfDTj8ctseku5cnj/31kbsbaz408vr1YYepufk8f7XW1i/ein7jsdSWl7BS2tXEhkTzzAnR1rbWjl76Qq6urosmjGFKWEh/HTkGAkp1zA2NGTxrKlM7PTI0L4GQr7y7oesXbJAnvpbWV3DtshormXcQiaT4eflwaoFc7CzttRofyhz/nIqC6cr9nL7uo98Pbs/3UvV9nde1otPrGB3VAzSiiqc7GzkQaGanoNCuwe+6NrbWOHr6UF8YgqrH5ojfz0+MYWJo4PR09PVOEBSE12DC5XJLijEzcmBP/76WZpbWvhy626+2xPJhjXLgfbk2jEBvjwSMR0DfX0uXLnOp1t28s5L6xSyqo7GnmXdqiX4DHOjqbmJ65nZfLV9Lyvnz8bPy4PKmhq2HoyiqblZnqQKcDj2LM+uWtqelZVyVSFQ86+vPN9j2KKqoE49XV1e+cVjvX4AeG7hbTxdnXn7xXW0tLTy0Xfb5AGH61cv5fDpsyReu8Hv1z8ln0dd6GGHY2cu8NITK7GyMJcfi7jEZGaMD+O1Zx7nemYW2w5FkZKWzgiPYfy/Zx4n5UY63+8/jJ+3BzZ39nNfAyE7aw9K3I1MJuPlJ1ejp6fHjsPH+HTLDv6w4Rn5ow1V7Y+elFdVI62oxMPVWeH1/uyjrlRtf2fR8ef59dpVmJtK+PFglDwoVJNzULhrSOyNqWEhJKRcpaWl/UNVUFxCdkERk0LbezOaBkhqQtPgQn09fVbMm4meni7GRoasXDCLlBvpFEvLAfAe5ioPvLS3sWbBtIm4OtqTfD1dYTnTxo5huLsbOjrtz0E9dCqemePD7oQbWjHc3Y1VC+bcyUe7+xiNmePD8HR1RkcHeSx9eqdk2p6oCuqUmBj36elNRoYGrJg3C0MDAyQmxkwKHSUPOOyJJqGHHZbOnY6NlQW6ujryKxY3J0cWz5qKq6M9syeOxdbKEl0dHfn+nT91IhJjY25m5cqX05dAyK5uZueQlV/AM8sfxsvNBXdnR365YjHFZWWkpN09pr3dH9KKSgCF/LL+7qOuNN3+R+bOwNLcDF1dXWZPCFcIClV3Dgp3PfA9XYDR/r78GBlF8vWbhAWNJC4xBR93N3n8jaYBkprQNLjQztpKoSg729uhp6dLUakUB1tr6hoa2RMdw+W0DCqrq+WJCZ3D/gBcHBXXl11wmxu3cjh8+my3dVZU18ijS7pG/5hJTOSBmspoEtTZW3bWVgo9HXNTiTzgsCeahB526DmWR/GHOEszM4Z1CuLU0WkvYJ2DN/sSCNlVYbEUc1NTHGyt767b3Aw7aysKi0vlVxS93R/NdyLkO2e39XcfdaXp9jt2+kyZmUqAu0Gh6s5B4a4hUXT19fWYEBJIXFIKo/19SUi5xiNzZyhMoy5AsqeIj55CEDUNLmxtU7yUk8lkClE02yKjKJaW8+LjK+QpuR/8dystXS4Buz4FXyaT8ehDc9XmuHVcznadVxVNgjp7q6cfbjQKvVQRetihp4QAPd0ur+nooKfbNVz0bshiXwMhe2yykpiYzq/3dn90/AZQV9/QPem2j/uos95sf0+fqY5zRJNzUGg3JIYXAKaEhZCakUXM+USaW1oI7fQru7O9Lbfy8hWKXtcASXNTCTVdhhqKpWV9bk9peYX80gsgM7cAmUwm/+bPzM1ncugoXB3tMTQwoLW1rVsvpSfuzo4k3+jdrV49URa22BHU+cLjK5g7aRxxSSn9XldvdA497NAReujsMPC9ps6BkOamEnR0dDQKhOzK2cGWqpoahTy+yuoaSssrepW712259nbo6+tRUFIif20g95Em268pVeegcNeQKbrO9nZ4u7mw6+hJwoP8FS6LNAmQHOntQXZBIbfyCgBIz87jbPLVPrdHX0+f/+46SEFxCbfyCvjhwBGCfX3kl59OdrakZmTR2tpGW1sbu6JOdCv6PVk4YzLXM7PZejCKvKJibkvLSLp2g22R0b1qX09hi/0N6hwImoYeDpS+BkJ25evpjqerC9/s3E9WfiE5hbf5esc+HGxsCPYd3uf26evr4evhzs2suwV2IPeRJtuvKVXnoHDXkBhe6DA5dBQZd3qQnWkSIOnp6szyiFn8e9seWlpb8XF3Y/aEcM4mX+lTW1wc7Aj28+Hj77ZTW99AwHAvnlg8X/7+6ofmsnnvId7452cYGugTHuSv0T2uI709ePnJ1Rw4Gct7//keXV0dHGxtmBDSu+jznsIW+xvUOVA0CT0cKP0JhOzquUeXsTUymg/+uxWQ4evpzpNLF/Z4Wd4bU8eO5qdDx1g6ZzodIxUDtY802f7eUHYOCncNqeSIgyfjuHj1Bm++8Mx9bUfH/Ziv/2rtfW2HMDTIZDL++sV/mT9tosp/nDIYDJZz8H76WSRHNDY1kZVfyMmERGZNCLvfzRGEAaWjo8PapQ/R2tq7H/a0SZyDmhsSwwtb9h8h8doNxvj7MWmMuKwRhh53Z0fcnQffcyk6iHNQc0NqeEEQBOF++1kMLwiCIDwoRNEVBEHQIlF0BUEQtEgUXUEQBC0SRVcQBEGLRNEVBEHQoiFbdGMvJvOHj768380YdHKLinnurY0aPefhftHk2G145z2upmdqqUUPPm2FaJ5JuswH//3xnq/nQTZki+7PTXZBEc+9tZG6hkaV05maGDNxdJDaR/4JP2+fbtnB9kPHej3f+JAgpBVVXLmZcQ9aNTSIovszY2NpwVPLFqpMvRCEvtLV1WFCSCDHz1y8300ZtIbEPwOG9iyoo3HnqG9oJGiEN55uzt2miU9M4XDsWaQVlVhbmDNn4jiFB3Q3NDaxO/okl1JvUltfj62VJQunT2bcqAAuXb/Jtzv389H/viqfvqhUytv/+g/vvrYBawtz+YNuJoeO4mBMPHX19YQH+bNm0TxOX7zEkdNnaWxuZkJIICvmzZY/fUomg+j4BE5dSKKiqgY7ayvmThrLpDtPalIXDCitqOTdLzcB8Oq7HwIQHuTPupWLu+2D3KJi/vL5t7z/+q8xk5ioDKNsam5m+6FjpNxIp66hAQszU6aFj2H+1AkA/Pnzbxk/KpC5k8fJl/+fn/ZhZGjA2iUL7uzTRnYePcml1DSamltwc3JgecQMvIfdTevQ5Nj1pLyymo+/287N7FwszExZMnsa40YFIJPJ+L8Pv2TWhHCFIM2iEilvf/If/rDhGVwduycqaBIW2pOr6ZkcOBFHXlExhgb6eLm58uzqJRgaGGgcIKns2AIqj0NdQyOvvvshb6x/Cg8XJ3mbfvv3T1geMZPxPTx9rrGpmR8OHOFmVi7VtXXYWFkwc1yY/FzYtPsgl9Pae6od8T9vv7QOJztbikql7DxygrSsXAz09fD1dGflgtnyxBKAUSNHcDAmjuraOszvJEwIdw2Jopt8/Sa7ok6wesEc/H28uJSaxsGYeMxNTeTTXLmZwff7D7M8YiZBvj6k3cphW2Q0phJjxgYHIJPBJ1t+orq2jrVLFuBkZ8NtaRlNzd2fnapKYYmUy2kZPP/oMipravl6x34qqmowNjbk+TXLKaus4psd+/Fyc2FscAAAB07GkpBylVUL5uBsb0duYRGb9x7C2NhInjQLyoMBba0seWP9U7z75Sb++cZvkBhrHh6pKozy8OmzZObm8+LjK7AwM0VaWUV5ZVWv9sdnP+xEX0+fDWuWYyaRkHjtOh99t423XliHjZWFRsdOmb3HTrE8YiZrHo7gwpXrfLvrAI52Nni4ODEpdBTxSSkKRTcuKQVPV+ceCy5oHhbaWWpGFp9u2UHE5PGsXbIAmUxGakaW/KHnmgZIKju2MDDHobPWtjYcbW2YPSEcU4kJWXmFfL//MJbmpowJ8OOpZQupqavH3saaVQvuPtqzqqaWf3zzA+NGBfJIxAygPUn4sy07eGP9U/JUDDdHe/T09EjPzmVMp8+v0G5IDC9Ex59n3KhApo0dg72NFXMnj8PXUzFr7GjsOcKD/Jk9cSyOtjZMDR/NlLAQjsSeAyAtK4f07DzWr15K0Ahv7KytCBzuzRh/3162RsYzyxfh7uJEsK8PoQG+ZOUX8ItlC3F3dmT0yBEE+/pwPbM9jLC5uYWouAQeWxRBsK8PdtaWjAnwY9bEscReSFZYsqpgwL5SFUYprajExcEedxcnrCzM8RnmSniQv8bLvpmdS2ZeAc+uXoqXmwv2NlbMmzKhPaH48jVAs2OnzLhRgUwYHYSdtRXzp07A39uTY2fOAzB5TDCFJaXyh3O3tbVxLvmqyue8ahoW2llkTDwhI31ZOmc6Lg52uDraM2fSWIyNDHsVIKnq2Pb3OHQlMTbioemTcHdxwtbKkrCgkUwfO4YENWGcpy9cwt7GmpXzZ+Fsb4ezvR2/WLqQwlIpWfmF8ul0dXUxNzWl9E6opqBoSPR0C0tKu11GDXd3U4i/KSyRynuWnadpT9Ftj8Y2N5X0O4jRwcZaYbzU0twMJztbhWw1S3NTSsvbP5BFpVKampv5+Lvt3ZZlb2Ol8LeqYMC+UhVGOTVsNJ9s+Ym3P/kPgcO9CfL1xt/bU+Nl5xQU0dLSym/++kG39zq2TZNjp4y3m4vi3+6uJF9vjzKysjAncLg38YkpeLg4cTktg4bGJsYGKy9WmoaFdpZbdJtlSrLAehMgqerY9vc49ORo7DniklIoq6iSJ2F4uqoe1skpKCIzN5/n3trY7b2SsgqFISNDfX15qKagaEgUXWjPsVL1tzoyVD9sTben4MoecrN6Wm/n9Nd2d4MRO/7/zReeUVvwVQUD9pWqMMrhHm785ZXnuZKWwfXMLL7cupsAHy+eXb20fSt63Cd3n/naJpNhJjHh/dd/rbIN/T12cl32xZSwEDbtPsiK+bOIT0whLNAPYyPlQy+ahoWqWGXPNAiQVHVsVR0HZaEUbSpCNeMTUzgSd451Kxbj5eaMsZERB2PiSVJzS1mbTMaYAD/W3zn+qtTW14vxXCWGxPCCs70dBbdLFF7Lv13cZRpb0nPyFF5Lz8nDyc4GHR1wd3aiuraOgmLF5XQwN5XQ2NSskI/Vn+DKDk72thgY6Ku8hNWE/p1C1VPYpDqqwihNTYwZHxLIU8sW8qtVS0i8dkMe091TmOftTvvE3dmJmrp6MnLzla5bk2OnTOadPDv537kFCpluwb4+GBgYcOp8EpdvZjI5NET18voQFuru7CgfKupqIAMklR0HYyMjDPT1qam7GylfW9+g8j7sjNx8Aod74e/jKf8Syu0pjFOm+Flyd3HiZlaOQkx7T6pqaqmpq2fYIH7+7/00JIru7EljiUtMlqeY3szO5eJVxW/tiCkTOH85leNnL1AsLef0hUvEXkwmYsp4oD1Y0MfdjS+37eHKzUxKyytJzcziUmoaAK6O9pibSjiZkAhAWWUVkTHx/W67oYEB8yaP59CpM5w4d5Hb0jLyioqJOZ/E8bOa33ZjbWGOrq4OKWkZVNfW0dDYpNF8qsIoD58+y6XUNErKKigtryDpWhpWFuYYGbafqP7enly8ep3q2jpkMhnHz16ksEQqX7avpzu+nu58s2M/l1LTKC2v5FZeAfuOn5YXKk2OnTIJKVc5l3yV0vIKjsSeJTXzlkIwo66uLpPGBLM7KgZ7ayv5WLUyfQkLXTBtIpdS09gTHUNhSSmFJaUcP3uBhsamAQuQVHccRnp7cPpCMq2tbTQ2NbMtMkplLpuTnS238gqpvbNtF69e50qa4n21dtaWZOUVIq2opKaunrY2GdPHjkFHR4fPf9xFek4epeWVXM/MZtOeSOruhJtCe2fG3FSCu/Pduylu3Mrh/J1x/J+7ITG8MHrkCEqmT+KT739CV0cHZwc75kwMl/9YA+3hgk88PI/DsWfZefQE1hbmLJ83Uz7Oq6MDLz6+gl1RJ9m0+yD1jY3YWVmxaMZkoL04rlu5mG2R0Rw+fRZ7aytmTxzLt7sO9Lv9i2ZOwdzUlJMJiew8cgJjI0PcnByImDxe42VITIxZNncGe6Jj+G7PQcICe75lrCtVYZT6enrsOx5LaXkFenq6eLo68+LjK+ThiDPGh1JUKuWdT/6DgYEBY4P8GT1yhHzZOjrwwuPL2Xf8NNsio6mqrcXC1BRvd1cmhAQBmh07ZZbMnsaZS5fZvO8QFqam/GLZom7jkpNDR3Ho1BkmhQarXV5fwkIDhnuxYc1yDpyIJTr+PEaGBngPc2VKWHuveiACJNUdh8cWRrBpTyS//fu/MJWYsHD6ZKW9b4CZ48MoLCnl7U/+g76+Pt7DXJgzaSxXbt79F36zxoeTlV/IO598TVNzs/yWsd+uW8vu48bffQAAIABJREFUqJN89sNOmptbsLG0wN/HEwODu6XkfMo1JoeOUij851KuklNQ1O13lZ8jkRwhDGk3s3P54L9b+dtrG/r1g6OgmZKyCv727028/dKvFMZ03/1yE6GBfsybMuE+tk471CVHDImeriB01dLSSlllFfuPnyY8aKQouFpSXlXFU8sWKhTchsZGyquqmTEuVMWcPx+ipysMSbEXk9my/wjuzo5sWLMcS3Oz+90k4WdCXU9XFF1BEIQBJIIpBUEQBhFRdAVBELRIFF1BEAQtEkVXEARBi0TRFQRB0CJRdAVBELToZ190YxKS+N0/PuP5tzcSFZdwz9bTNRDyQQiIHAi/+8dnnEu+qvT9yJh4Nn61+Z63o6/ruZqeyR8//Ub+NLi+OHbmAn/89Gulfw9GItj13hlS/yKtpbWV373/Kc0trfzttQ2YqElQqKmrZ9uhKNatXMJILw8MDQzuWdtEIOSDRyaDnUdOsHDG5B4fY9lXzg62hHR6RsW9EhWXwLmUq/zf80/f83UJmhtSRTc59SZ21laYGBmRcPka08eOUTm9tKKStjYZAT5eGBvd26DGjkDIe6mltVX+iEdtam1t6+GZwQ++1MxbVNXUMnpkb9NDVAvw8VL7IB1Vhur+/rkYUkU3LjGF8SGBSIyNOXb2gsqie/JcIlsjowDkyQbvvraB+oYGteGEH27ayjAnR1rbWjl76Qq6urosmjGFKWEh/HTkGAkp1zA2NGTxrKlMHNP+dKuugZCdlZRV8ObHX/L7555mmJOD/PWY80kcOBHL3157oceT7MNNW3Gys6WxqYmUtAxcHex59enH1IYHymRw/Ox5Ys4nUVZRhZnERCH3qrK6hm2R0VzLuIVMJsPPy4NVC+bIM706AjhDRo4gJiGRyppaPnvrf5BWVLJ57yHSc/KwMjdn6ZxpGh+7mIQkDp0+Q219PYHDvXli8Xz5flIX7qhJm7sqLa/go++24e/jxWML5/bYkz1/OZXAEd5KC1zn4MXehDAeO3OBuMRk3nzhl5y+cIl9x0+z8f+9IM8YA/h6x36ampp4fs1ypftbVejnmaTL7Dx6AkCe9LBmUQTTlJwTmoSDXrmZwf7jsRQUl2JuJiE80J+HZ01RSEUR1BuUe+tyWgaNd54Ha24mUZvGClBWUUVaVg5PL1+EoYEBPxw4Qm5RsUIR62zG+FA8XJ3Y+NVmPvrfV+QRO+WVVRqFE8YlJjNjfBivPfM41zOz2HYoipS0dEZ4DOP/PfM4KTfS+X7/Yfy8PbBREmrYwd7GCl9PD+ITU1j90Bz56/GJKUwcHayyV3P64iXWLJrH44vn09raqlF44P4Tpzl+9iIr5s3Ez8ud6to6cgran2crk8EXW3cjk8l4+cnV6OnpsePwMT7dsoM/bHhG/ri+nMIiXB3teevFX9KehAFfbt2NoYEBv/vVk7S0tvLjgaPyZ7aqUlBcSsqNdH69diWNTc1s3nuY7/ZEsmHNckB9uKOmbe6QV1TMx5u3Mzl0FEtmK/9iuJmVq/S5t1HxCdTW1bN0znQOnTpDa1ub/DGgvREWOJJtkdGkZmYRONwbaE/rTb5+k6eWPSSfruv+BtWhnxPHBFNTV6/R8IIm4aDXM7P5avteVs6fjZ+XB5U1NWw9GEVTczOPLpzb6+3+ORt01ygZufk42FoTHuxPeLA/hgYGFBSrz8uKS0rB38cTc1MJRoYGjPb3JS7x/7N33uFRXWf+/8yMeu+9C1RAAgkQILoppppqbONeYyeON3EcO5vsb+N4k83GsTfeuMQNO8aGADammt47CIGEBBIS6r33NqMpvz9Gc9GojgQIgc/neXjQ3HLmPeW+9z3vvXO+l/s9ryumihP6eXmydPZ0fD3dmRMfh6uTI3KZTDp+wfR4bKysuJ5XaNL3Th8/loSUq6jVemmYkopK8kvK+l0HdmSgP1PHjcFMocDSwqJf8UBVezsHzySwfM4Mpo0fi7uLMyH+vpL89vX8AvKKS3h21QME+/kQ4O3Jcw8upaKmhpTMG21gpjDjoYVzsLK0xMrSgsy8AgrLynl65RL8vDwI8vXmsaULjJQ2ekOj0fD0ysX4eLgT7OfD40vnk5KRRUV1rUnijqbaDJCVX8TfvtrI/GmT+3S4Op1+oXpH++6rk209eIxzyVeYOXEc3+49xKW0DGZMiOm3nj1hY23F6JEhJKTcWD84OT0TuVzOmPAR0rau7W2K6KepmCIOuvfEGe6bNL5jzDgxIsCPhxbO7dAYFMu3DIRhF+nW1jcQ2kngLtjPh8Qr6fh4uPV6jk6n42xSqhTZAUweG8Xn3+1g1f33DWj6Y6o4YYCPsRSJo52dkTyJTAYOdrY0NrdgCjGRYWzcc5DL164zPiqC05dSCA3ww8utb1mXrrpq/YkHmnUIBkaE9Dx7KK2oxt7WFg9X5xt1s7fDzdmJ0ooqaZFyg4aYdF5lFY52dkbT+QBvT5MeTrq5OBlNzYN8fZDL5ZRVVaPRavoVdzTV5oqaWv7+9WYeXDC733y/VqtXj+g6dnYcPsG17Dxee/Yx9h4/Q25RKa898+hNPROYNHY067btQdXejoW5OQkpaYwbHW703V3b2xTRT1MxRRw0v6ScjNwC9p081+38usYmKXUl6J9h53QVCgVKlUqa7jc0NWNjZdXnOWnZudTUN/Dl97v48vtd0natVkdSWiYTx5i+Wr2p4oQKeZcHVjIZCrm8yyaZyVGAmZmCyWNHczophZjIMBJS0ljZi8psZ7o6hf7EAws6JMn7orcn9Z2393QjM02U01S6tFs/4o6m2OxoZ4e1lSUJl68yaczoPh2lQiHHytKS5k4yNKC/+Z66eJm8ohJGBvlz7vIV8kvKCA8OMKFOPRMdFopcLuPytetEhASRnpPHL5582OiYnvrZFNFPU+lPHFSn0/HIonnSjEgweIZdemFM2AgSUtK4nl9IRm4+V67nMGpE3096T19MITYyjP/302eM/k0bP3bAKYbBiBPeKqaNH0t6dh7HL1yiXa1m3OiIAZfRn3igQQizNzkXbw9XGpqaqKypk7bVNzZRVVtnJPrY7Tx3N+oaG420smrqG2ht61vEEKCqps5oRpBXXIJWq8PLzdUkcUdTbba0MOffnlgNMhkfrP8OpapvHbkAbw9Ku6S2DPI867bvQSaT8eIjK/hiy07Ss/P6rWdvmJuZETsqnISUNBKvpONoZ9ttet/dtv5FPxUKRY+K1V0xRRw0wNuTyxnX+y1L0D/DzukqFHKmT4jBw8UZHw93psT2ndNsamklJSOL+NhofDzcjf5NHTeGzLwCo4uxPwYjTnir8HZ3I8TPh60HjjEhKhJLi4G/N9yfeKCFuTlz4+PYfvgEpy+lUFlTR15xKccTkgC9mGSQrw9ffr+LvOJSCkrL+WLLTjxcXIgOG9Hr94YFBeDj7saWA0fRaLS0q9V8t/dwnwKJBhQKBV9t3U1JRRW5RSWs37mf6LBQPFydTRJ3HIjNlhYWvPL4atDpeP+bvh3vqBEhXM/vnpMP9vPh1acfoaa+gREBfvzyqYeprqvvt559MWnMKK5m5XLiQjJxY0b1+16wKaKfbs6OVNfVUVhaTlNLa6/5dVPEQRfP0uuubdp9kKKyCsqra0hKy2DznkM3Ve8fI8MuvWDA1JX+zyVfwcxM0eN7j8F+Pjg7OHD60mWWz51pUnmDESe8lUwdN4bsjmh7MDjY2fYrHrh09gysrSzZe+Is//phP/a2tkwacyOn99IjK9i05xDvfbUJ0BEWFMCTyxf36UBlMnjxkRV8s2Mfr//1fWxtbJg9eTy5xaX92uzj4UZUWAjvf72Z5tY2Ro0I5vGlC6T9pog7DsRmK0sLXnniId7/5ls++OY7XnlitZTO6syU2Gh+OHqKyppa3F2cjfZ5ublK+XbDTf5mCAsKwNHOltLKKpMERU0R/YwaGcLYiJG8t24TLa1tvb4yZoo4aERIIL948mF+OHaKv65dj1wuw8PVhcldcsGC/hHKEcOM3cdOc/FqBr9/+dk7bYoA2LznEFqdljWL77/TpgjuEoRyxF2CUqUir7iUYwmXmD15/J02R9DBkvum4ezgIF6LEtwyhm164cfGhl37uZSWQWxkOFNiB5daENx6bK2tWDD93pcNFwwdIr0gEAgEtxCRXhAIBIJhhHC6AoFAMIQIpysQCARDiHC6AoFAMIQIpysQCARDiHC6AoFAMITcM073TglM3gnRQa1Wy3/+/TMyTVyrVyAQDB/uiR9H/NgEJuVyOQtnxLP1wFH+/SdP3jE7BALBwLknIt3OApM21laY3UaHaBCY7GmBlKFk/OgISiuryOljaT+BQDD8uOsj3ZsRmPzZW3/liaULuXg1g4y8fFwcHHhs6QLcnR35Zuc+svKLcHdx4snliwj08QL6FpjsjYEK+pkieGhpYU54cCAJKWmEdFLaEAgEw5u73unerMDkzqOnWDp7OivmzeTA6fN8sWUnnq4uTJ8wllX338cPx07zz+9/4M2fP08/S5z2yGAE/S6kplHX2MTKebM4k5SCStXOA7OndzsuyNebxCvXBm6UQCC4Y9wT6YWeMFVgcuq4McTHROHr6c6yOTOob2wiamQIcdGj8PV0Z+nsaZRVVVPX2DgoOwYj6Dd5bBQ5BcWs27ab+JhormblsmHX/m4qAE72dje9eLZAIBha7vpItzdMFpjsJCbpaKdfON2/h22NzS2DEt8bjKCfjbUVv3jqYT7+11a2HTzGr55Zw4frt7Bx9wEee2C+dJy5uRnt6vYB2yQQCO4c96zTNVlgspNwoiF9YCSm2LFxsOupDlbQr7i8kuKKSiaNHU1hWQVlVdXdFGybW9uws+me7xUIBMOXeza9cCcFJjszGEG/nMJi/vGv73ly+UKcHR34dNM2nl31AOOjjIUqSyqqCPD2upXmCgSC28w963TvpMBkZwYj6FdVW8/PHl3F6BEhVNfW8crjq4kICex2XFZ+IVEjQ6TPSlU7x85fGpAQp0AgGFru2fTCnRaYNDAYQb+JY0ZJf8f3ooZcXF5JZW0dEzuV09LayqY9B3lpzUrcXZxuXSUEAsEtQyhH3KWs274HRztbI5Xj1Mxs1m3bzX+/+tId//GGQPBjpT/liHs20r2X0Wp1eLg4c1+Xh3OZuQXMmzpROFyBYBgjIl2BQCC4hQiNNIFAIBhGCKcrEAgEQ4hwugKBQDCECKcrEAgEQ4hwugKBQDCECKcrEAgEQ4hwugKBQDCECKcrEAgEQ4j4RdoQ8vHGrTg72EuKEfWNTXz5/S5yCkuQy2W899tf8rO33uFXz6whLCjgDls7cHQ6Het37iMpPZOW1jZ+++JTkszRcKNrXwgEQ8U95XTVGg3//u5HtKs1/OW1n2FtZXlH7MgvKeN/Pl3H3377S2w62RAeHIittZX0ef+p82g0Wv70yxexMDdHJpMxccwoHGxtb5ktH23YgruLMw8tnHPLyuyNyxlZJKVl8Przj+Nob4/VMP45cte+EAiGinvK6V5Ov46bsxPWlpYkpKZ1W/R7KNBotL3umz15vNHnyppagvy8cbS3k7Y9u+qB22bb7aayuhY3Zye83d3utCnodKDTaZHLjTNoGo0WhULerS8EgqHinlp74f2vvyU6PBQbKysOn0vkdy8+1e85V7Ny+OHoaYrKKrAwNyPYz5efPLwMC3NzdDo4dCaBE4lJ1DU04ebsxLwpcUwZN0Y6/2dv/ZUH58/h8rVM8orLmDpuDEfOJRp9x4SoSJ5fvdRoSvva2+/T3Mv6vp3TC+XVNWw9cIzMvAK0Wi0+Hu489sB8/Lw8KKmo7FPxeN223ZxNvmJU9h9eeR4vN1fKqqr5fv9RMvMKMTdTEBYUwOqFc/qUJMouLGbr/qPkl5ZhbWnJ+NERrLr/PszNzfh083aS0jKkY91dnPjjL17sVkZWQRHvfrGBXz2zhm/3Hqa8qoYgX2+eX72U8uoaNu85RFVtHSMD/XlqxWJJCTkpPZMDp85TWlmNQiEn1N+XhxbOxc3Z0ajcFx9ezs4jp6iqreOVJ1aTlV9EamY2YyNGcjzhEvVNzfzjzde7pRd2Hz/DhdQ0quvqsbOxYWzECFbMnYWlhblk+4HT5zl05gJtShVRI0MI9vPhRGKSUT0HqvwsuPe4K1cZS83MRqlUAWBvZ2Mkm94bNXUNZOYV8MyqJViYm/OvH/ZTWFaBv5dHr+ekZ+fx0YYt3D91Ek8sW4hOpyM9O0/SU/vh2CkSUq7y0MK5eLu7UVhaxjc79mJlZcm4UeFSOQdOneP5h5YR6u+Hql3FpLGje0wvdOZ/f/NvvP/Nt/h6uLNq/n0AaLVafvbWO9IxDU3NvPvFBoJ8vfnFkw9jY2VFXnEpWq0+mm5TqvpUPH5qxWKaWlq7pRcampr53y//xcQxo1l5/yxA73T+sWELv33xqW7RoeGcD775lnGjwnl82QJq6htYv2MfGq2Wxx6Yz4sPL2ffyXNcSssw6Wa37eBxVs2/DxsrK77ZsZfPv92BVqfjoYVzsTA3Y922Pew8clLShGtvV7Ng+mR8PNxQqtrZe+Isn2zayn+89DSyTjLNh88m8srjq3FysEej0ZCVX0RBaRm+nu68+fPngJ4lnc0Uch5dMh9XJweq6xrYvOcg2w4d45FFeqd8KS2DnUdOsmbx/YQHB3IlM5udR08apSgGo/ws+PEx7N5eyC4sxsPVmQnRkUyIjsTC3JySiv5ldk4npRAZGoS9rQ2WFubERIZx+tLlPs/Zc/wMYyPCWD53Jj4ebvh6ujN3ShxWlha0t6s5eDqBNUvuJzosFDdnR2JHhTM7Po5TicblzoiLZUSAHzIZt3RZxeMXklAoFPzk4eUE+Xrj4erMxDGjCOh4OGWq4nFXTiYm4+7izOoFs/F2d8Pb3Y2nly+mtKqavOLSHs85cSEJG2srHl+6AG93N0aPCGH1wjmcvnS514i9L1bMm0VkSBCBPl7MjY8jq6CIhxbOITw4gGA/H+6bNJ5rOXnS8RPHjGJsxEjcXZzx8/LgqRWLKKmo7CbBtHzeTFycHJDLZZib62MKM4UZDy2cg5WlJVaWPffP/GmTCQvyx9XJkbAgf1bdfx8XUtOl/UfOJhIfE83UcWNwc3Zk1qRxRIYGGZUxGOVnwY+PYRfp1tY3EOrvK30O9vMh8Uo6Ph695wl1Oh1nk1KlqA30Muaff7dDP/3tZWpXWFbOinmzetxXVlWNqr2d97/+ttu+rqoMPp7ufdRo8BSWlhMa4Nur/aYqHneloKSMnMJiXnrz7W77KmvqCOnU/gZKK6sJ8fc1ioJHBPih1eooq64h1Kb7OX1hpMLckdP29+q8zZbG5hbpc0lFFdsPHSe3qISmlhYMPqymvgEfjxvt3/lvAwZh0r5Iychi74mzlFZW06ZUStuVqnYsLcwpq6pmShcVjyBfbwpKyqTPg1F+Fvz4GHZOV6FQoFSppIixoakZG6u+nzKnZedSU9/Al9/v4svvd0nbtVodSWmZRvI3nekr+DBEJr9/+dkeL+TOmJsp+tw/WPqLjkxVPO6KVqcjdlQ4Lz68fED2yHqZmve8tW+MFZd72iaT6q/TwYfrv2PUiGD+/SdP4mRvj1wu4+d/fBd1lweXPfVFf/nU8qoaPt28jYcWzmXcqHDsbKzJLijm3S83oOlI5ZgSpw5W+Vnw42LYOd0xYSM4k5SCl7srWq2W6rqGbhFGV05fTCE2MowHZk8z2n7k3EVOX7rcq9MN8PbkWk5+j285eLm7Ym5uxuVrWf063a6YKfQXvk7b+5sMphDg48XpSym0q9U9Oo6cwmLmT5uMb0ekbVA89u0UeSsUCrQ6YzsCfLw4nnCJNqUSK0vTXqvzdnfl3OUraLU65HK9l8wqKEIul+Hp6jLYKppEXWMjNfUNzJ82CVcn/YOz4vLKPt8UGQh5xaXY29oajYOC0nKjY7zdXMktLjV6iNo1FWNQfhZOV9AXwy6nq1DImT4hBg8XZ3w83Pt1uE0traRkZBEfG42Ph7vRv6njxpCZV9CrOu7CGfEkp2ey/dBxSiurKK2s4si5RNqUKizMzZk/dRJ7T5zl6PmLlFfXUFRWwfELSRw5d7FPm5wd9JFYSmY2jc0ttHU8FBwoMybE0K5W89nm7eQVl1JZU0filXTJIZiieOzm7EheUSnVdfU0tbSi1eqYGReLTCbj441bySoooqq2nms5+azbvoeW1raebYmLpamllQ279lFWWU1aVi7f7T3M1HFjsbWxHlT9TMXB1hYbaytSM7MBaG5t49u9h4weoN0MXu6uNDY3k9+RKigur+TQmQSjY2bHT+BscipnLqVQVVvPsYRLpGfn0TnOH4zys+DHx7CLdA10fne1L84lX8HMTNGj0m+wnw/ODg6cvnTZSMDRwKgRwfzs0VX8cPQUh85cwNLCnBB/X6aNHwvAkvumYW9ry7GES3y//yhWlhb4eXlw/9RJfdpkY23Finmz2H7oOF9v38340fpXxgaKo70drz/7GN8fOMp7X20CdPh4uPP40gWAaYrHsydNIK+4lLc+/AJVe7v0ytgbzz/BtoPH+Me/vqe9XY2LowORoUHSw6euONjZ8soTD/H9/qP86ZN/YmVhwYSoSFbdf9+A6zVQFAo5Lzy0jG/3HObAqfNYWVmyZObUXh/6DZRAHy9WzpvFJxu3AuDi6MCimVNYv3OfdMy4UeFU1dax/fAJ6ZWxOZMncPHqjdfkBqP8LPjxcU+9pysQDCXrtu+htbWNl9asvNOmCIYRQiNNILgFqNUajpy7SGVNLTX1DRxLuMT5y1eNcrwCgSkM2/SCQDDcSE7PZNfRk6g1GjxdXXh25RLGhI+402YJ7jJEekEgEAhuISK9IBAIBMMI4XQFAoFgCBFOVyAQCIYQ4XQFAoFgCBFOVyAQCIYQ4XQFAoFgCBl27+lu2n2QYwmXcHFy4M+v/vROm3PTDKQ+TS2t/Prt9wF4avki4vtZd+LHxO0YF3dirF1Ky+CzzdsB+O9XX5IW8Dl/+Sp7jp+hqq4OjUbL8rkzWTB98pDYdDP0VJ/e6mjKuT8GhtTp1jU08tu/fSwt2ffI4nnMmvjjW5EpMTWdtVt2AvDX13+Og92tE6IcDBt27edkYjKebi689coLd9SWW01DUzNvvPMhAM8/uJQJ0ZF32KLu1Dc2sW77HrRaLX5eHtKC+oJ7kyF1uucuXzVaI/ZMUuo973QfWjiHVfPv63Ut2rsRtUYjLV95N3Mn+iY2MowP/vM1QK9oAVBeXStJMD25bKGkDHI30FN9BH0zpK10NikV0K/qlF9SRkFJGcXllf3e1bVaHYfOJHAmKZWq2jrMzMwI9vVm0awpkkpC56nKTx5ezpFzieQXl+Hi5MCD82cTHRYqlZeSkcX3+49SXV9PkK83i2ZOkRQi+oqGfvfex9TUNfDA7OksnjmFkooq/uujLwD4869+ioujA9/tO8zhs4n4eLjz+5ef5du9h42msN/uPWwkXGmIwuJjolg1f7a0Xdnezvqd+0i8ko6VhQUz4mJZNHNKn+30ycatFJVX0tjcglqjxtHejtjIMJbOnt6rjNCfPv4nRWUVgH4xb4OaxFPLFxEdPkJKd6y6/z5yi0q4mpXDhKhI5k+bzIZd+ymvrqGppQUZMjxcnZk+PsZoPdn/+ugLSiqqmDhmFK5Ojpy+lIJarSYqLJRHl8yX5HNSM7PZc/wMZVXVqNUa7O1s8Pf04LGlC3qdCSRfu87+k+eoqq2jpa0NczNz/L08WDRrCpEhQSRfuy6tHAawdstO1m7ZadQXXdMLpoy1zmmgFfNmUlRWSUpGFtaW/fdTUnqm0ZT6/OWr7DxyUtr/50/XAfCLpx4mMiSo2/mdUyKLZkxh38mz1DU0EejjxaMPzDe6lpKvXefgqfMUllWg0+nwcnNh5sRx0ip6prR7WWU1Ww8eI7eohJa2NmytrfF2d2XJfdMYGejfrT5dUwSlldX8c+sP5BWX4uzgwMp5M4ntpC/YE6mZ2ew/dY7C0nK0Wh1+Xh4smjnF6Bq+mxmyB2nZBUWUV9cAsGbJ/ZKK65kOR9wX3+zYw9aDxyirqpb0r9Jz8njvq41cy8nvdvza73ai0WixsDCnvKqGL7bspLljndiSiio+2bSV8uoa5DI59Y1NfLppu0l1CO9Q6M0uKDL6HyArv8jo//DggB7LcHVyxN3FWfoc4O1JsJ8Pbp22AWw/dIJzyVdQqzXUNTax88jJjvVbe+dKVg421lZEhAQSFhhAQ2Mzh88m8q8fDvR6jq+nu6S4a25mRrCfD8F+Pth1bDOw88hJUjKzsLOxQSaT0dDcTF5xKW5OjkSNDMXX053i8ko27TnI6Usp3b4n8Uo6py5exsXRgZY2JQkpaew/pZe10ffBNnKLSvBxd2NUaBDWlpakZGbR0tbz+r4AJeWVVNfV4+PhTnTYCOxtrbmeX8hH67dQVlmNrbWVkSyQu4szwX4+BHSSBerKQMfajsMnKCgtw8HO1uR+6oyzgz1e7q7SZ19Pd4L9fLDuZ3H52vpGth06jp+XJ5aWFmQXFvPB+u9ob1cD+gDnk41byS4sxsrSAntbGwrLKli/cx8/HDsNmNbun327g5SMLGysrIgOG4GrkyPZhcWUmqBbCPD5tzuoqWtALpNTWVPLZ9/uoLDjJt8TCSlpfLRhC1n5Rfh6ehDs50NuUQkfbdhipDZ9NzNkka7BuXq5uRLk682kMaPZffwMCSlXWTlvlrFUSyfKq2skGfE58RNYvWAOrW1K/vjxl9TUNbDryEkiQozVgudPm8SyOTPIyi/i3S830KZUkVdcwugRIRw4fR6tVoeFuTlv/vw5XJ0c2XrwGAdOne+3DmHBgZxNvkJOYQk6nY6sgiIszM1RazRkFxQxNmKkNKDCenG6c+In4GhnK+V0f/74aimS67wAubODPa8/9xjtajX/8d4nqDUaruULFrkyAAAgAElEQVTkdxND7MxfXnsZGysrGpubUbWrOZmYzIHT57l0NYOnli/qUeX3mZVLpJyui5MDv3nhCWlfZ3ucOuxxsLNFrdag0Wp5941XJAes0Wj4evterucXkpiaztQuq29ZWVjw+5efw97Whve+2kRGbj7XcvJZNgcqa+tQazSYm5nx4iMrpPaob2zCuhc1ZYCZcbEsnBGPUqWiubUNVXs7b3/+DW1KFUnpmSycEc/PH18tzSaWzZ7eZ053MGMt1N+PV59eg0aj4dd/fR+lqr3ffupMfGw0To72/H3dZgCeXrmkTwVrAzqdjl8++TD+3p7kl5TxP5+uo66hkfMpV5k2fiw7OqLnYD8fXnvmURQKBZ9u3kZyeib7T55jbnxcv+2u00F5dTUAK+6fRUzESACUKhVKVbtJ9ZsxIYZV8++jtqGRtz5cS5tSxcHT53l21QM9Hr/t0HH9eXGxPLrkfgDW79zHqYuX2XHkZL9R8t3AkDhdVXs7F69eA2ByTBQAk8ZGsfv4GRqbW7hyPZuxHR3alfziG8J/cdF62R1rK0uiR4Zy/EKStNp/ZyZE6S8sQzQN0NCkFzksLq8EIDTAV5oKTR4bZZLTNUSvbUolxeWVZOUXEeLvQ0ubkqyCInKLStBqtchkENaPOGR/xMdGYdMh7+3kYE9VbR0Nzc29Hq/V6th19CTnkq90uyDa1WqaW9ukiHYwTImNli5KMzMFLU1tfLV1N9dyb0jWG6hrbOp2flhwoPT97i5OZOTm09Ckr4+vhz7abmxu4Y13PsTR3g4/Tw9iIkcybXxMrzaVVdewYed+Sioqu+2ra2wccB0HM9ZiR4Ujl8uQy81wsLOjsqa2z366VdjZWOPfEcUH+nhhY2VJS5uSkopK6hubqGvQ1z82MgyzDt24uKhIktMzaVerKamo7LfdZTIIDw4kLSuXTzZuxdrKEh93NyJDg5gTH2eSnYbr3dnBnrCgAFIysqRrsCv1jU3U1jcAevXpExeSjPaXVVb3Kl11NzEk1ielZUqSNXtPnJGmlQbOJKX26nQHgyFPaBTZdTzAMzwyGczDExdHB9ycHamqrefi1WtU19UTHxNFS5uSo+cvkpqplz738XC/aQkbe5sbDlKaBfQhVHkyMZnjCUnIZHoZIj8vDzJyC6SBe7MS4F2VPDbvOURadi62NtYsnjkVJwc7jpxLJCu/qJsmG4B1J+lzg8yOrkPu0drKkt+++BQnLiSRW1RKaWUVV7NyuJqVg06nY0YPGnY6nT6H3djcgr+XB7MnT8DS0oINO/fR3NrW7UZwu+hcL4N2XJ+Kp7cITRf9va6fJfoY5qa0+4sPL+dk4mWu5xVQUllNdmEx2YXFlFXVDFgNZSBjcNSIYCN16HuJIXG6Z5Ju5Ph6mpakdmiJ9RSJBfreeJJ7ITWNIF9vWtuUpF7X62UFDvBJr6+nOwWl5WQXFlNb34CzowPnOqaUphAWFEBVbSrHEi4BEBroR1ubkiPnEjl1UV/P8ODAvorAwuKGHLip07T+KCrXpzXcnJ1YNmcGAJm5BSada5AnV/VhS9dr1/B9MREjmT15PDod/HD01ACt1tOmVKHT6Ywkld5Zu57swmJyi0t7dLqNzc2SRPvcKROZNHY09Y1NtHaSTwew6CQ/pGzvu61v9Vi7nbS2KUnJyGJM+AiS0zOlceTj4Y6jvR1ODvbUNTSSlJbJ7EkTUCgUXLiSDuhz9z4e7ia1e1VtPXOnxDF3ij6y3XH4BHtPnCW3qMQkO89fvsrK+2dR29DI9Y7nHb09OO9st7lCwfK5MzHI4NU2NJJbVCJFuRt3H6CmroGw4ADmTZk48Aa8g9x2p1tdV09mnv7if3D+bKnzQN+Qv/vbP9BqtZy/fNVonwFPVxfiY6I4m3yFw2cTSc3MprmllebWNuRyGQ/Mnj4ge+ZNncT5lKsoVSre/GAtdrbWNLf0/rCmK+HBgZxJSqW1TYlcLiPEz0ca8EqVPprvLZ8r1cnthnruu19uwMXRgfnTJjHiJlISgT5enAQqa+rY+MMBtDqd0c2uL7w67KltaOTNDz7HxsqKF1YvM7o59PR95VU1XLyagaO9HXnFpZRVVQ/K9rrGRv7wwVp8PNxxdXJAo9GS03FRd34Q1hl7W1ucHR2orW9g74mzVNbqRTu7BlNWlpY42ttR39jEd/sOczIxmVEjglnaw7i51WPtdmJuZsZn327Hyd6emvp6AJzs7STl62Wzp7Nu+x5yi0r43XsfY2amoKZOP3WfP30yVpYWlFVV99vuf/96M2ZmCjxdXbAwNyej40beW7905fiFJBKvptPU3IqqvR2ZTMbcPpzk8jkz+Grbbi5nZPHWh2vxdHOhpq6e4ooqRo8IZlxHTvd6XiElFVV95vyHK7f97YWzSanShRATGWa0z9nBnkAfb6DvtxieWLaIlfNm4eXmSnVdPRqtloiQQH751JpuDzb6w8fDjZceWYmnqwtanRZne3ueXrFI2t+bMKOBzg7Vz9MDSwsLHOxs8XDVv30gkyG9WtQbnq4uLJszAyd7OxqamsgtKpHym4Nl6rgxzJs6EXtbG85dvkJDUzNLZk3r/0T0Oe0J0ZHYWFtRXlVDblEJKnXfUeHqBXOIiQxDp9Nx4kIS3u6ujB89uB8e2FpbMz4qgnZ1Oxm5BWQVFEltNGvi+B7Pkcngp4+sIMjXm+q6es4lpzI3Pg5nR/tuxz6+dAGebi60q9XkFZdSUV3bqy23cqzdTuztbHhh9TIUCjlymZwQf19eeeIhadYSHxvNS2tWEurvS5tSRUNTM/5eHjy+dAFLZk0FTGv3uOhRWFtakldcytXrOVhbWTJt/FgeX7bQJDtfeGgZLo4OaHVa3F2ceP7BpX067MkxUbz82IOEBflT29DI1awcmtvaGBs+gukTes/v3038KJUjyqtr8HS9EW3uPn6GXUdOIpPBn3754/k5ouDu4177mfy9SH/KEXf3Y8BB8vZnX+Pi5IirkyO19Q0UlJYDMG18jHC4AoHgtvKjdLpjI0ZyLbeAsspqFAo5gT5exMdGM7OHBzYCgUBwK/lRphcEAoHgdiGEKQUCgWAYIZyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCFmd/LL84pL+XbvYYrKKlC1tzNqRDD/9sRDd9KkW8o3O/Zy+lIKnm4uvPXKC3faHMEg+a+PvqCkoor4mCieWrH4TptjMqaOv7u1fncrQ+J09508x/ZDxwH4/cvP4uPhDsDX2/dQUlGFk70dMZEj8fPyGApzBHeADbv2czIxWdyABD967likq9PpKK2sBmDOlDjmTZl4p0wxGa1WB4BcLrvDlgiGkt+99DQ6nQ65TGTjBDfPHXG6WQVFvPvFBunz9/uP8v3+oyycEc+yOTOMjq2qrec///4pOp2Ol9asJCZiJADX8wv53y//BcD/++kz+Hl50NzSys4jJ0nJyKK+qRlbaysiQ4NYNmcGrk6OAKz9bieJV9Lx9/LgP376DAAnLiTxrx8OAPD3/3gVSwsLNu0+yLGES7g4ObB8zkx2HT1JVW09f/7VT3F2sO9Wp9Y2JRt3H+DytetYWlgwc+I4dD3UXavVcehMAmeSUqmqrcPMzIxgX28WzZrCyEB/6bjy6hp+OHqajNx8mltbsbexYWzkSNYsvh+AN975kIamZubET2D1gjlGdQvw8eJ3Lz4FwJsffE55VQ2TY6JwdXTgaMIlFHI5C6ZPZkrsGDbtOUhyeib2tjY8cN90Jo0dLdlQWlnFziOnyMwroE2pxNXJkenjY5g7JQ6ZTH/jMUxNJ44ZhauTI6cvpaBWq4kKC+XRJfOxsrTgTx//k6KyCn29qmp46c23AXhq+SLiY6O7tdHv3vuYmroGHpg9ncUzp1BSUcV/ffQFAH/+1U9xcXTgu32HOXw2ER8Pd37/8rNUVNeyYdd+yqtraGppQYYMD1dnpo+PYdakcVLZqZnZ7Dl+hrKqatRqDfZ2Nvh7evDY0gU42Nn20GPw50++6jb97q+csspqth48Rm5RCS1tbdhaW+Pt7sqS+6YxMtDfaMy999tfYm1lSVlVNX/4YC0ALz/2INFhoSb3g6njrz/2HD/DsYRLtClVjIkYwaNL5mNjZcm67Xs4m5SKn5cH/6/jugF476uNZOQWEBMxkpfWrOyxzN3HTnPxagZ1DQ20qdqxtbZiRKA/K+fNwt3FCaDb9bb7+Glq6xsJ8PHkiaUL8XRz4WzyFdZt241CIecvr72Mva2NVP6uo6ewt7XhL6+9jEIxvG+Od8Q6a0tLgv18pM8uTg4E+/ng4ujQ7Vg3Z0dGjwgB4MylFGn7pasZAPh5eeDn5YGqvZ13vtzA8QtJ1Dc14+nqQptSRUJKGm9//g0NTc3GBctMi1YbGpv559YfaG9XY2Vp0etx63fuIyElDaWqHYVCwf6T57h45Vq3477ZsYetB49RVlWNi5MDcrmM9Jw83vtqI9dy8gG9Y/rLZ19zITWNxuYW3F2cAUjLyu3hm02rR1JaJntPnqNdraaxuYXv9h3hr2vXk3glHY1WS1VtPeu276Gypg6Asqpq3v58PUlpGdhaWzEqNJja+ka+P3CUb/ce7lZ+4pV0Tl28jIujAy1tShJS0th/6hwAvp7u0gVibmZGsJ8PwX4+2HVs60p4UAAA2QVFRv8DZOUXGf0fHqw/tqG5mbziUtycHIkaGYqvpzvF5ZVs2nOQ0x3jpr6xiU83bSO3qAQfdzdGhQZhbWlJSmYWLW1tJrWjqeV89u0OUjKysLGyIjpsBK5OjmQXFlNaUdWtvL6Goqn9YOr464tLaZnsO3kOMzMFqvZ2ElPT+Xr7HgBmTdTfuIrKKigoLQegsbmF6/mFAEzu4eZpICu/CI1GQ4i/L1EjQ9BqtSSlZfD3rzej0WiNjq1raOSbnXuxt7FBq9WSlV8k2TAhKgJbG2s0Gi3nL1/tZLfeF8RFjxr2DhfuUKTr6+nO6889xs/eegeAOZPjmBM/odfjZ06M5cr1bK5cz6GhqRl7W1uS0zMBiI/Rd3ZCShplHemKFx9eztiIkRSUlvM/n66joamZo+cvGkXRMhOdrlqj4cH5s5k7JQ6tVktPTq6qto6LV/UDfPqEGB57YD51DY281RGdGSivruFs8hV9nTsi1NY2JX/8+Etq6hrYdeQkESGB7DlxhtY2JQqFnF89vYbQAD8AabB3Rm5iPWQy+OMvfoKZQsG//+8/0Ol0NLW08D+/+hmtbUre/OBztFotmXkFuLs4sffEWdqUSnw93fndi0+jUMhJSEnjy+93cSzhEgumT8bR3k4q38rCgt+//Bz2tja899UmMnLzuZaTz7I58MzKJVJO18XJgd+88ESftoYFB3I2+Qo5hSXodDqyCoqwMDdHrdGQXVDE2IiRFHZEzmEdTtffy5N333gFmUxGQ3MzGo2Gr7fv5Xp+IYmp6UwdN4bK2jrUGg3mZma8+MgKKbKtb2zC2srSpHYE+i1Hp4Pyav1YXHH/LGl2plSpUKrae+ib3vvQlH5oV6tNGn/9IZfL+P3Lz+PsYM93+45w+OwFktMzqaiuJdDHi0AfL/JLyjhzKYWAxfO4fO06Wq0OOxtrokeG9lruc6uXYmdjTWNzC0qVisLScj7dvJ2q2jryikuk8Q36meArj68iMjSI3cfPsOvISbILi2lvV2NubsaU2GgOntbPFOdOiaOiupbi8kqAHmdNw5E7+vaCqUSNDMHN2Ymq2jrOJV9hRKAftQ2NyOVyJo4ZBejfhACwMDdnbMcgD/D2xNPNhbLKavKLywb13ebmZsyerL8hyOU930VLOkUvhum5k4M9EcGBJHXcHAAjG+Ki9XZbW1kSPTKU4xeSyC/R788r0tclLDDAaEAGeHsOqg6gjx4NKRYbK0uaW9uICgvFwc4WBztbFAo5Go1WmhEYbCgur+Tl/3rHqCydTkdxeaWR0w0LDpSiWXcXJzJy87vPLky1tcORtimVFJdXkpVfRIi/Dy1tSrIKisgtKkGr1SKTQVhHSkapUvHV1t1cy82Tcu8G6hqbAPD10Efcjc0tvPHOhzja2+Hn6UFM5EimjY8x2b7+ypHJIDw4kLSsXD7ZuBVrK0t83N2IDA1iTnzcgNrClH5QazTStr7GX3+EBwdKqbNJY0dz+OwFAEoqKvFwdWbWxHGs276HhNQ0Hpw/W5ptThzTd4SZlJbBrqOnehwPhr4xYNOREgRwd3aStjc2t+Di5MDMuFgOnblASUUlecWl0uzQ19Md/7vkQfxd4XRlMhkzJsSw9eAxziSl0tjcAkD0yBDpQje9MP1/Ou2NaU2rUtnr4fY2NgN6cGamUEh/K8wUfRx5a9DqOtWjrfd62FhbSX8rOmy0s7aWtul0OuP/OzKCXu6ujA0f2a08Z0fjvLZ1p9SLIXLTDSqrCC6ODrg5O1JVW8/Fq9eorqsnPiaKljYlR89fJDUzCwAfD3dsbfR12LznEGnZudjaWLN45lScHOw4ci6RrPwiqY2srSz57YtPceJCErlFpZRWVnE1K4erWTnodDpmxMWaZJ8p5bz48HJOJl7mel4BJZXVZBcWk11YTFlVDc+vXmoU3RpuEj31nyn9YEgJwS0cf7rufTchOpIt+4/Q3NrGmaQUMvL0Ds8w2+yJkopK/vXDfnQ6GD86gpjIkTS3tLFpz0H913S5QXZO4XWexRnawc3ZidEjgrlyPYczSalSsDXlLoly4S5xugBTx41h19FTlFVVU9vQABjnkYJ8vTl18TKq9nYuX7supRfKq2oACPT1AsChw0nXNDSi1miQy+SkXMvq/YtN8Lfe7m7S35l5BQT5eqNWa8gpKDY6zmADwIXUNIJ8vWltU5J6PVu/30e/P8jPm/LqGjLzC8gtKpHy34VlFdLd3N7WhoamZumCa2xuIbuwiFtFkK83FdW1KJUqFkyfLE2/lap2LqSmGdXZFCzMzQFQ9TC97omwoACqalM5lnAJgNBAP9ralBw5l8ipi/ocbXhwoHR8Ubk+3RATMZLZk8ej08EPR08ZldmmVKHT6Vg+d6a07Z2168kuLCa3uNRkp2tKOVW19cydEsfcKfrIdsfhE+w9cZbcohIAo2ChsraOAG/PHqNSU/rBTHHjMu5r/PVHZm4+9Y1NONrbcT4lTdru7aHva3MzM6bEjuHgmQS+P3AUjUarjzD7mIGVlFdJ/vvBBbNxdrAfcK65KzMnjuPK9RzOX76CUtXeMeMd3f+Jw4S7xuna2lgzPiqCc8n6hra1sZae7oJ+inPo7AXKKqv5dPN2vNxcqKypQ6fT4WBny32TxgMQERLEkXMXaW5p5b8//godOskxDxZ3FydiR4WTlJbB9kMnyC0soby6ltqGRqPjPF1diI+J4mzyFQ6fTSQ1M5vmllaaW9uQy2U8MHs6AItmTCElI4vWNiXvfLEeD1cXlEoVZmYK/viLF6V6FJdXkpKRxTtfbKCyprbHfOFgWTgjnsvXsqhtaOT3739GiJ8PTS2tFFdUotPpmDZ+7IDK83JzAaC2oZE3P/gcGysrXli9DBen7g9PQe9QzySl0tqmRC6XEeLnI9VPqVIBN/K5oL9hlVfVcPFqBo72duQVl1JWVW1UZl1jI3/4YC0+Hu64Ojmg0WjJ6XCCA0ndmFLO37/ejJmZAk9XFyzMzcnILTDaH+Lvi7m5Ge3taj5c/x3uLs7kFHZ3kqb0g6njrz80Wh3/+ffPsLO1pqZOH9jERIzE09VFOmZGXCyHziZIfdFXlAvg7+2JXC5Dq9XxzY69BPp4cSIxeUB2dUWfbtTPhAyfBzzjvYMM/0d9nZjZKRKJi440mkpZmJvz+rOPMTMuVv/KTlUNVpYWTBwzit+88IT0sGNM+AgWz5qKg50tNfUN+Ht5smTW1Ju27YllC4mLHoWZQkFmXgFhQf5Svtn4uEWsnDcLLzdXquvq0Wi1RIQE8sun1hARoo/cPN1c+PefPElc9CjsbGyoqK5Fq9MxKjRYKmfxrKlMiIrEytKSyppapo4bQ0xk2E3Xw4C3uxu/eeEJxo+OQKfTcSUrh6q6egJ9vFjacXMYCJPHRjEhOhIbayvKq2rILSpBpe79JtHZofp5emBpYYGDnS0ervo3OWQyjF6xW71gDjGRYeh0Ok5cSMLb3ZXxoyONyrS11t+429XtZOQWkFVQhKerC8vmzGDWxPEm18WUcuKiR2FtaUlecSlXr+dgbWXJtPFjeXzZQgAc7Gx5btUDeLq60NqmRCbTP3Dsiqn9YOr464txo8KYP30yarUGC3NzxkdF8OTyRUbHuLs4MSpU/zZR52cqveHp5sJTyxfj5uxEZm4BaVm5PLxw7oDs6opMJmP6hBs5+PiYqJsqb6iRVdXWDy7xdgdoaVPy67ffR6vV8ruXnr6pB0sCgWBwfL//KAfPJDAmfAQ/e3TVHbEhNTObjzZswc7Gmr/8+mWjAOxO49rL7M3AXZFeMLz4XVBajrYjMhQOVyAYWs4kpZJy7TpXrucAcP/USUNuQ25RCYfPJpKRq3+IN3vyhGHlcE3hrnC67Wo1CSlpmJkpCA8O5GmxKIdAMOTkFpWQfO06TvZ2LJwxhRGBfv2fdIuprq0n8Uo6Vpb6dM2C6ZOH3Iab5a5KLwgEAsFwp7/0wl31IE0gEAjudoTTFQgEgiFEOF2BQCAYQoTTFQgEgiFEOF2BQCAYQoTTFQgEgiFkWL2nq9PB1oNHuZCSRn1TEzod/OaFJ4wWPO+PS2kZfLZ5OwD//epL0nKGw8XOppZWfv32+8AN5YShsLmv7xcMHCHmKIRXB8uwcrpJ6RkcPJ0AwKjQYOxsrbGz6XkhizspdDgQOwXDm8TUdNZu2QnAX1//ea+SPYK7i+EshDqsnK5hMXBLCwv+7cnhK8V+t9jZE3Y21nzwn68BoJDfXT+fHA6oNRrMFAohVikYNEPmdJOvXefgqfMUllWg0+nwcnNh5sRx0hKB/7duk7QKvFKlksQL//Hm690UG/oSOrTstAhyRU0t63fsI6uwCBdHBx6cP9toOUhTBP+60pedIDNJdPJWtdm6bbs5m3yF8OBAXn36EQBe/+sHNDa38MzKJUwaO5rL167z8catyGTwzhv/BtBneuMnDy/nyLlE8ovLcHHq3mYpGVl8v/8o1fX1BPl6s2jmFN7/+lsAnn9wKROijVf26kx/Ypu3U1i0J6HDb/ce5si5RMm+N975EEBKGRhSCJPGjsbZwZ7Tl1KwsrTkj7/4SY9ilS1tSnYdOcnla9epb2rCzsaGsREjWT53JjYd6+D2J1hpKoPpezsba5Ns7IlbLbzalYGMweLySnYdOcn1/ELaVCqcHRwYPzqCxbOmYGFuPmAh1KFmSG7TZ5NS+WTjVrILi7GytMDe1obCsgrW79zHD8dOA/ol7Jw6pELkcpkkXtjTKuKmCh1+umk7OUXFqNVqyqtq+GLLTppb9cKBAxVeNNCXnaaITt7KNgvrEHDMK9bL15RX1UiqGgbBwKwOUUcfD3fsbKx7+CZj1n63E41Gi4WFebc2K6mo4pNNWymvrkEuk3cING43qT79iW3eLmHRvoQOXZ0cJTtAv9ZtsJ8Pbp22gV4Edf+p81iYm2PeixpDu1rN/365gaPnL6JsbydqRIi0zOT/fbWxQ19vYIKVfTGYvjfVxp64lcKr/dHfGPzr2m9IvnYdtUaLu7Mz1XV17D91jo82bEGnG7gQ6lAzJJHujiMnAQj28+G1Zx5FoVDw6eZtJKdnsv/kOebGx/HworlYWVqw98RZLC0s+hQv7Evo0KAMCjA2YgRPr1hMcnomn27eTptSRV5xCaNHhAxYeNFAb3aaKjp5K9vMoJygVLVTWFYh3d2tLC2lC86gmmu4SPtj/rRJLJszg6z8It79coNRmx04fR6tVoeFuTlv/vw5XJ0c2XrwGAdOne+33P7ENm+XsGhfQodz4ifgaGcr5XR//vjqHnO6Gq2WN55/nGA/H9RqTbf9BvuLyytRKOT8v5eextnRgbqGRn77t39QUFpOUlom40ZHDEiwsi8G0/em2Dg+KqLbd91q4dX+6GsM7jt5FqWqHUsLC/7wil5E8/DZRL7bd5iM3AKu5eQNWAh1qLntTre+sYm6jhXsYyPDMOuIFOKiIklOz6RdraakopIQf99b/t33TRqPTCYjyNdb2tbQpI8GBiq82B+mik6awkDazNXJkeq6erILiigsq+hQTI3i6PmL1DY0Utjh1MKDTXO6E6L06QE35xtvUBjazKC6GhrgK033J4+NMsnp9ie2ebuERfsTOjSF8OAA6c0Us14iXYP9Go2W3/7t4277C8vKGR8VccsEK12cHAbc96ba2JVbLbzaH32NQUMdRgb6SSKaE8eM4rt9+tlpfkmZ1N/DlaF9kGa6vuMtwTDFUHReb3OQwot3jH7aLDw4gDNJqWTl6y+8YF9vwoIDOXLuIofPXkCt0XRTWegLgzCgUR69o80MpsiGuiM7M0Bh0f6EDk3BlJuvQdDT0sK8RxUKw42mP8HKgTDQvjfVxr4YCuHVvsbgDe7gGLxJbntO19HeTsqBJqVlolZr0OngwpV0QJ9z8fFwH3C5AxU67Ioh+jUI/q2YN5MV82ayaOYU3F2cBiy82FV0EuhRdNIUBtJmhqljek4elTW1hAb4MSLAD5kMTiZeBsDX00NSzb0ZfD3135ldWExtvV5D61zHdLI/gvz07W0Q2zRQ2DEtNvSHQVgUMElYVKvV9S0s2g8WFubS371N8U25vKXxpGpn3OhwaTwtnzuDEH8fqe0MgpU/fXQVfxVLV3gAACAASURBVPzFT1g4Ix5AapM2pZL/W7eJ/1u3SVosvDcG2vem2tiVrsKrgEnCqzD4a6A3DHW4nl8oacAldBLRNHzHzfqH28mQRLrLZk9n3fY95BaV8Lv3PsbMTCEJ382fPtkoGjGV3oQOTeVWCy+aKjppKqa2mWHqaJDvDg3ww87GGk9XV0mY0dR8bn/MmzqJ8ylXUapUvPnBWuxsrWluaTPp3P7ENu+UsKin2w3RxXe/3ICLowPzp00asN7cpDGjOXIukZKKKt79YgNhQQFotBpKKqpoaGrmty8+hYujQ7+ClWqNVnrgZJhm98ZA+95UG7tyq4VXb4YF0+O5fO06SpWKP3ywFmdHe8o76hoeHEBESBAwcCHUoWRI3l6Ij43mpTUrCfX3pU2poqGpGX8vDx5fumDQopADFTrsyq0WXgTTRCdNxdQ2c3Z0wN1Fn6uUyWSEduTGO6/qH2ZiPrc/fDzceOmRlXi6uqDVaXG2t+fpFTeEC83Ne7+H9ye2eaeERQ2Ckk72djQ0NZFbVNL9TQkTMDc349fPPsbsyeOxt7MhPSePwtJynBzsmT9tMm4d+eT+BCsNSsdAv79MHGjfm2pjT9xK4dWbwcfDjTeef4KxESNRKORUVNfi6uTI/GmTefmxB6WXWm7WP9xOhHKEYECUV9cYSXIb3giQyeBPv7y9P2H+MXAu+QpfbdtN7KhwXnx4+Z02RzAI7glhSsHw4e3PvsbFyRFXJ0dq6xukV76mjY8RDvcWkJFXgI21FWsWz7vTpghuEyLSFQyIddt2cy23gMamZhQKOV5ursTHRjMzLtbkd2cFgnuZ/iJd4XQFAoHgFiKEKQUCgWAYIZyuQCAQDCHC6QoEAsEQIpyuQCAQDCHC6QoEAsEQIpyuQCAQDCE/qh9HnL98lX9u/QEQelj3Op2VI/786k/vtDnDitvRNqK9TedH5XQFgtvNcBZEvFkampolSaP+pJkEvSOcruCe5KGFc1g1/747u/avCRiELu927pb2Hg4MidM1RSTQQGpmNvtPnaOwtBytVoeflweLZk4xEqYzRVxPp4M9x09zLOESqnY1MZEjpdX/+yP52nX2nzxHVW0dLW1tmJuZ4+/lwaJZU4jsWDquqaVVEnhcMW8mRWWVpGRkYW1pwYy4WBbNnNJr+RXVtWzYtZ/y6hqaWlqQIcPD1Znp42OYNWmcdJxOp+P4hSROX0qhrKoauUyGp5sraxbPk+qSlp3LgVPnyS8po12txsXRgfunTpKWpuxPxA8wkjZ5cP5sth44SmNzC+OjIlmzaB4HziRw7PxFQK8csHLefcjlMqM2WDZnBnnFpaRn52FpYc6MuFiWzJomrfr0ycatFJVX0tjcglqjxtHejtjIMJbOno6lhYXJ4+Rs8hXWbduNQiHnL6+9LC1Uv/vYaXYdPYW9rQ1/ee1lvtt3uNt01yA0OXHMKFydHDl9KQW1Wk1UWCiPLpkvLZfZplSxcfcBktMz9SKMcbFU1zdw5lJKnxFsX4KI0eEjpLZadf995BaVcDUrhwlRkcyfNtmk8WCq/amZ2ew5foayqmrUag32djb4e3rw2NIFvabUdh87zcWrGdQ1NNCmasfW2ooRgf6snDcLdxcnkq9d55ONW6Xj127ZydotO6X2/XZv9/Y2RaRyINfRq//zf7S2KZk/Tb/+9d3KkD5I60skEPSLEX+0YQtZ+UX4enoQ7OdDblEJH23YQlKH9pmp4nonLiSx6+gpGptbMDNTkJaVy7aDx02ys6S8kuq6enw83IkOG4G9rTXX8wv5aP0WScerMzsOn6CgtAwHO1vqGpvYeeQk6dl5vZbf0NxMXnEpbk6ORI0MxdfTneLySjbtOcjpSynScRt3H2TT7oMUlpZjYWaGq5MjJRWVVFTXApCYms77X3/LtZx81BoNnq4uNDa3SAtimyLi15nGphbWfreTppZWlKp2zlxK4c+fruOHoydRtatpbG7h0JkLnE+52q1OO4+cpLmllQBvTxqbW9h97DRHOxw1wJWsHGysrYgICSQsMICGxmYOn02UVHw709c4mRAVga2NNRqNlvOXb9hh0MaLix6FQtH3sE68ks6pi5dxcXSgpU1JQkoa+0+dk/av37WP85evolS1Y2FuxoHTCT2KMHbFVEHEnUdOkpKZhZ2NDTKZzOTxYIr9erHQbeQWleDj7sao0CCsLS1Jycyipa33tY+z8ovQaDSE+PsSNTIErVZLUloGf/96MxqNFltrK2nNXwB3F2eC/XwI8PLstcyBilQO9Dq6WxnS9EJfIoHm5mZsO6R3ijPiYnl0iV6We/3OfZy6eJkdR04SOyrcZHE9wyD09/LgjeefQCaX8d4/N5JdWNyrfQZmxsWycEY8SpWK5tY2VO3tvP35N7QpVSSlZ7LQPd7o+FB/P159eg0ajYZf//V9lKp2ruXk96rV5O/lybtvvCJdcBqNhq+37+V6fiGJqelMHTeGyppaTiYmARATGcbzDy7FzExBc2sbbR3yNN8fPAboF5l+/bnHcbCzRaPRSgtYmyLi19nGdrWaFx9Zwdjwkfzp4y8pqaiitLKKV59+hFB/P37/wWfU1DVwLSeP+JgoozqNCR/BT9esBOCjDVtIzczmwKlzzJ6sX3z8L6+9jI2VFY3Nzaja1ZxMTObA6fNcuprBU8sXGUmz9DdOpsRGc/C0PoKaOyWOiupaSb/NFIltKwsLfv/yc9jb2vDeV5vIyM3nWk4+y+Z0iDB2KHTMnBjLmsX3U9/YxFsfru233L4EEZtaWqW/nRzsef25x3Cws0Wt1qDRavsdD6baX1lbh1qjwdzMjBcfWSFFtvWNTVj3IbH+3Oql2NlY09jcglKlorC0nE83b6eqto684hJGBvrz88dXSzndZbOn95nTHYxIZX/X0egRIShVKrzcXfvti+HMkDrdvkQCFQq5JAFz4kISJy4kGZ1bVllNu1ptkrje6JHB1HSUFTsqXFpce0J0pElOt6y6hg0791NSUdltX11jY7dtsaPCkctlyOVmONjZUVlTS0Nz7wthK1Uqvtq6m2u5eWi1xuFmXWMToBfgM0Si86bESYKIttZW2FpbUd/YJLXXlNgx0sWlUMgl2ZWBivhZ/3/2zjuuijNf3A+9915EQEVBLCjYULFgN3ZN0cRN4k0123fvZvfuJrl3d7PJzd1kN/2XaqKJ3VjAAoqIICKCgIogCNJ77+Wc3x+HM3LgwJkDiG2ez8c/GKe878w733nnnfd8H2MjArqMCU52thSVVWBnbSWYZ51sbamqqVOb5Lv7fnxHeZKWmU1NfQNNzS0YGxlxNCqG+CtXeylx2js6aGxuEXqIoFkmGRIUQGTcJYrKysktLBZ6TW5ODoxwdlR/0rvh4zVSOJ6DrTUZObeFOhWVVQjnfdpEhYTRysKccd6eKqbpwTArYIJwvfT19WhqaNHYHsSW381R0duub2zi9//7EVYW5rg7OTLZdwyzp07us0zJ1zM4GnVe7bVVVwZNDERSqek+0tYhd78yrEFXrCTQb7QXI/p4bdFWrtf9I4WYDxZyuWL8sb6xiRHOjiyYEYiRkSG7jpygsbml100BYNK9Xro6d3bUB3vCI7menYOZqQkrQoKxtjTnTHwiWbcLkMllfW43cMR93OjeE1LKPLu71ZTXSa6mbp2dd8rd0amqKY9JvEJ0QjI6OgpNkruzIxk5ecKDtef+NLUTextrxo/24urNW8QlpwkPl1kiermger2U6SjViSrv1iehnqJLbdtDf+U3MTbi9Re3cu5SMjkFxRSXV3At6xbXsm4hl8uZGxTQa39FZeX8cOwkcjlMHT+Oyb5jaGxqYXd4hGLfatr83UDb++hB5b6ZvaCUMdbU1WOgp8ea0BDhI0x1XT05BUUY6Ovj6ebC+cspglxPKaKTy+WkZmTh5uSAsZERNlaWVNfWkZmbz+LZ0wEEH1V/1Dc2Ut+o0D2HzprG9Enjqa1v6Nc4qy0FpYqPLZPHjWHBjKnI5XAs6rzKOp5uLujoKNpc5IVERrq5oK+nR1NLKy0trdhaWwp1jEtOI3iKorcrk8koqajC1dEeTzcXyiqrBYmfjaWFWonfUHD52g1Cum7opGuKHqG1hTmmJsZCfe1trFm9cC4AmSKuRX+ETJvC1Zu3uJii6D3r6uoKPdPB4OroIJz3xKs38B7hRm19Azdu5YraXowQsWcwF9MexNLS2oZcLmdN6J0PTf/75U6y8wvJKSxWH3RL7/TuNyxdgI2lhdoxbMNuOqbW9v7VNz0llZ5uLoOWVH657witbW1MGT+u1/DWg8R9E3QB1iycy7eHwkjJyOKtj77Eyd6WqppaCssqGD/aiyl+Y0XL9ZYET2d3eARXb2bzz29+QFdXV1TQtTAzE4LZ8XMXKK+uIfFq+pA+cEe6OlNaUcXlaxlYWZiTW1gsjMMqcbC1YU5gAOcuJZN8PYP/fC8Pq65Xri2rljLdejzrFoXw1f6jlFdV818ffI6DrTXVdfUE+Prw9OploiV+Q0FhaTmv//MT5HK5MH6pfNiNdHUmBiivquHHY6eQyeXEJff+QKQN/mO8sbexoqK6VvjboscHq4Fgb2PFVH9fEtPSOROfyJUbmTQ2NSO239uXELG7dbgnYtqDWGrq63nzwy9xdXTAztqSzk4Zt7o+rHb/ENadES5O6OrqIJPJ+f7wcUa6OnMu8Uqv9YyNjLCyMKe2voF9J04Tk3gFv9Feap2Cd0NSeS3rFs0trQOyh99P3Fc/A54x2Z9XN2/Ax3ME1XX1XMu6RWNLC5PGjmZOoGI8SqxcL2TaFFbMC8bc1IScwmIM9PVFCSd1dODlJ9bi6eZCZU0t8VfSCJ0ZhI2VxZDVc+PShUz29RFmXbg42DF1fO+PEk+uWMQTKxYxwtmRtrZ2KmtqcHG0x9HOBlCMlf38mU2M8x6Jrq4upRVVmJmYCNPJxEr8hoLVC+fgN9qL9o5OzE1NWDZ3JvOnBwIQPGUii4KnYWFmSnzKVeoaGlk5b/agjqejoyO0CWBIez5bHlvC9EnjMTI0oL29g0WzpuHv4w2AoX7//ZSBCBHFtgcxmJmYMNV/HO0d7WTk5JGVVyDIN9UNx4FCGrp1zQrsbazJzMnjelYOjy8LVbvullVLcbK3Fb6vKGfSqONuSyofVCRzhMSA6T7Hcuua5aJmDgwlaZnZfLxrP+amJvzjt68O2Y8MqmvrMDczxaArwDY0NfPfH39FXUMjswIm8Mya5Rr2IPEoI4kpJR46cgqKOH0hkYwcxayFBTMCh/RXXUnXMwmLjmWkizN6enrcyi+gqaUVYyMjlsyeMWTHkXg0kYKuxANHZXUtiVfTMTYyYvbUSSydM7SB0M3JAUdbG3KLimlta8fSzBR/n1GsmBesop+XkBgI0vCChISExBAiiSklJCQk7iOkoCshISExjEhBV0JCQmIYkYKuhISExDAiBV0JCQmJYUQKuhISEhLDyCMbdE9fSOS/P/7qXhejTz798SC7wyK02uZXb38wZOkH7xfyS8p46Y13VPLRPgzHuh94GNvLg8Cw/jiio7OTP7z3Me0dnfzjN6/0m1R5IHy8az8OtjZsWrZQ47oujnZMGjdmSI8P8Mf3P6WqRpHnVkdHkUDHd5Qn6xfPv+v24WkTFBqX+4Edh8KEJNagyJHr4eLE2kXzhjS72VBiZmLMzMn+GOgPvbOsoamZ4+cukJpxk+q6esxNTXF1tCckKOCutMPuRMQmcDH1Gv/18rMqy++n9vIoMaxBNyX9JvY21pgYGZGQdl1IBTicyOUgl8vwG+WF3yivu3KMedOmMG/6FORyOeVVNewOi2DnkRO88tT6u3I8JU922TbuFzzdXPjZuhWAIgH5sahY/v39Xt4ZwjwJQ0VnpwxbK0u2rl0x5Puuqavn3S93ArA8ZJaQZD4jJ49dR08ywWeUijmje5k0qYcGw/3WXh4VhjXoxialMn3SeEyNjTkdn9gr6P7102+YPnE8i4KnCcu+3HcEI0MDnl69DIDIuEucTbhMTZ1CPzJ65AhefHwNOw6FkZapyNV5Jj4RgDdf20ZDUzPvfbWLFx9fw5Ez56moruG1pzeSX1xGbFIKf3n1eUCRyPngqWhuFxXT1t6Bi4MdaxeFCNYEbTA3NcHZXqEUcXGw53ZRCbGXU1TWuXozm6NnzlNUVoGFuSmB4315bMFsIclKT8qrath55DjZ+YVYW1iwNjSEfSfPsDY0hOmTFHlkf/X2Bzy9ehlT/MZy5cZNvjlwlH/96dfCPkoqKnnzwy95+zevYGNpQVZeAe99tYtfP/ske4+fprSiCk83F7ZtXEVpZRV7wiOpqK5hzMgRbF27QuvUiYYG+sJ5cLa3Y3nILN7/9keqaupwtLMRVUZ1nIq9SGTcJVpa2/Af442XuyvnEpP5n1+8CIi7lq+89S4bliwk5UYmuYUlLJoVxCRfH/726Te8958/x9zUhNa2dn44dpKbufmCtWL+tKkqskgx7A6PpKOzkzdf+w9BnArg5e7K7KmT0NHR7bNMS+fO5PDpc1xMuUZTcwtuTg6sWzxPpS5h0XFcSrtOZU1tl6R1NGtD52FkaMCF5DQOnIoCEESZT61czNygAJX2omk/EkPHsAXdqpo6MnPzeHb9SgwNDPjh2EnyS8pE6VWU3Lh1m6NRMWzbuBoPFycampq5mZsPwNa1K2hoau41vJCVVwAoxnBf27IRa0sLOjs7yS8uU9l3S2sbAX4+rFscgoG+PolXb/DxrgO89do2bKz6/1lfv/WureNqZjbeI9xU6vHF3sNsXLqQsV4jqW1oYHdYBG3t7TyxYlGvfcjl8Nnug5ibmvL6C1vplMnYGx7Zled18ByKiGb9kvmYGhvz/eHjfLH3MDK5nE3LQjE00GfHoXCOnIlh82NLBnyMltZWLqVdx9rSAttBnM+k6xkcORPDkysWM9ZrJFczszkSFYOZiXG3Y4m7lqfOx7Nt02pGjXCnrb2NsqoalWN1ymQ42dmycEYgZqYm5BYUs/PoCawszAjoClSaaG1rIzUji8fmz1YJuErMu5k51JXpyGmF5kiZUjE6IZkPd+7jze3/gb2NYmhAX0+Xp1Yuwc7aksqaOvaER3Ao8ixPLF/EzIAJNDQ1qx1e6El/+5EYOoYt6MYmp+I7ylPoLU329SE2KUWrC1pVU4uZiQnjvEdioK+PlYW58KqmiTWLQrDt+k20rm7vanuPcFMJjMvmziQ1I4uUG1la92zCz8Vx4nw8crmczk4Znm4ubF61VPj/4+fimD99qqBJd7C1ZtOyUD78fi+PLw8VFCxKMnJuU1RWwd9//bLQ+9uyailvipAlimHtonn4eCqU2KEzg/j2UBh/eOEZPN1cAJg/fSqRFxK03u/N2/ls/5/3AOjo6MTS3IxXnlov+N4GwpkLicycPEGQNc6bPoWbefnkdXNuib2Wc4MCGN2ld1Jq4LtjamykogC3s7Yiv6SUhLR00UG3oroWmUyGi0iZYvcy6erqcuZiIk8sXySM+25aFkpGTh5n4hOFzkX3zGd21lasXzyfrw4c1TpYDtV+JPpnWIKuXC7nQnIa6xbPE5bNmOTPF/sOs37x/D5fqXsycdwYjsfE86f3P2P8GG/8RnkyeZyPIJ7sD03Z5ptaWvkpMpq0zGxq6+sFF9qYkSNEla07cwMDFDe3HKrr6zl25jyf/niA3zz7FDo6OtwuKiUjJ48TMfG9tq2pb+j1Wl1SUYm1pYXKcmcHOxWX2GDobhRQ+ru6O+qsLMwEhZE2jHS9M6bb3NJKzOUUPvnhAK+/uLXPoQNNlFRU9nKhebq5qARdsdfSVcQD+9T5i8Qmp1JVU0d7R4dwPLGo88n1R/cyVVTX0NHRqVJuHR0Y5eFGcXmFsCw1I4vj5y5QXF4pmKKBLhO0+KGBodqPRP8MS9C9np1DVW0dXx84ytcHjgrLZTI5ydczmTZRYQvt2cNTrHNHzGduasIb25/nxq1c0rNzORQZzZEz5/nji1s1zoTQ9EV6T3gEZZXVbN+8AQdbawwNDHj/2929JIti6D6m6+xgh4WpKX/99Btu3s7Hx9MDuVzOE8sXie9B93Hf9nc/66o9l+o3UPlYo6NumY7WwQNUx3RBEax++86/ib2cwsr5s7UqoxIxpRB7LTW1ibikVE7GXmTbhlV4ubtgbGREWHQcyVpMs7K3sUZXV5eisgomd5mW+0NtmdQYPnS6FpZWVPH5nkNsWhbKFL+xmJuakJ1XyHtf76JTJl5yOlT7kdDMsMzTjb2cSoCvD//18rMq/2ZPnURs0p0PTBZmpr3mSJZWVqn8ra+nh/+YUWxcupC/vPI8lTW13LytGNfV09MbsE33Vn4hwVMm4ubkgKGBAZ2dMpXexGBQBoq2dkVPycPFiZSMm6K3d3awo6aunuq6O/r3kvJKWtva+tzGwsyU1rZ2oXcGUNbjXN4r2rrKNJAyutjbkdNl/1WS2+PvobqW2fmFjB/the8oT4yNFA/1/OJSrfZhbGTIRJ9RRF28TFNLb7lpY1Nznw9Pextr9PX1yL5dICyTyyE7rxAXR8XDLLewGAszM0KCArAwM0VHR4e8HmXU09PT+DATsx+JoeGuB92GpmZSM7KYGTABV0cHlX/BUyaSmZtHedcHDF9vTy5fu0F9YxNyuZwz8ZcpLr8j6Lt89QYxiVcoKqugpq6eS2nXAbngDLO3sSK3oJjKmloampo1NrTuONvbkZ6dS2enDJlMxsGIqF4PgIycvK5jaq5zSUUlJeWVpN/KZfexU1iYmeLd5S5bMS+YG7duszssgoKSMkorq0i+nsGe8Ei1+xvrNRJXR3u+PRhGYWk5+cWl7DxyAgN9fbVvB6BIxG1hZsrZhCRA8UEvPDpO9Pnoj9LKKs5eTFIJlupoa+9QnIeKSnIKivj+yAmaWlqZ6DN6wGVcMDOQC1fSiEtKpaK6lrMJSaRn59K9OyjmWorB2d6OnIJi4YPl5Ws3uNo1Q0aJmHPx+PJQ9PX0+OunX3P+cgq5hcXkFBRxIiaetz7+CnkfHQUDfX0WzAjkp9PnSM3IoqSikr3HIymrqmLBDIV/ztnBjvrGRm53Da8UlpYTGac6/m5vY0VlTQ35xaU0NDWrLauY/bS2tXP2YpJwv0oMjLs+vBB/5Sr6+npq58R6ubtiY2lJbFIKa0JDmDd9CiUVlbz10ZcYGBgQ5O/L5G4Tx02MjTgTn8ihiLO0d3bibGfLto2rhVfYBdMDyS0s5q2PvqKtvZ03X9smupyPL1/E94eP8/o/P8HQQJ9Af99eZb6Yeo28ohKCJvj1u6+zCUlCILEwM8XTzYVfbn0C064v7OO8R/KLZx7n2NnzvPvlTnR1dXC0s2XGJPUKcR0deOmJtXx/+ARvf74Da0vFlLEfjp3s8xXZ0MCAbRtXsSc8khMx8TjYWLNwZhDfHDwm+pz0RW5BMbvDI5g20a/f8fjcwmLe/FDxsc/YyAhne1tefHwNo0e6D7iMU/zGUlFdw0+nzwlTxhbOCOTytTuv/GKupRjmT59KcXkFb370Jfr6+niPcCV0VhBXb97S6lzYWFnyx5d+xvFzFzgRE09NXT3mpiaMcHHi+Q2r1M7RVbJqwRzkMjk7j5wQpoy9tmWj8KOGka7OrFs0j89+PAiArZUly0NmsfPICWEf/mO8mTRuDO/v2E1Tc4swZaw7YvbT1NzM7vAIXnpyHQ621lqeTQklkjlCC97+fAdTxo+9LzxZVTV1/PH9T/mvl5/FXYtpd0PBgVNRFJVW8NrTG4f1uH2x46dwmptbeOnJdcN+7PvtXNxN0jKz2XEojL/96iW1sz0kFEjmiCGipbWV6rp65k3TbvrYUJGakUVmbj51DY3kFhbz1YGjeLg4DXvABcjMyWPl/OBhPy4opp6dib9MeVU1VbV1nE1I4mLKNWZ1TSEbbu7luRhuMnPyWBQ8TQq4g0Tq6T4gXEy5xtGoGGrqGjAzNWGslwcbly7U+ldiDzodHZ38+/u95JeU0tHZiZOdLUtnzyBwgu+9LpqEBKC5pysFXQkJCYkhRBpekJCQkLiPkIKuhISExDAiBV0JCQmJYUQKuhISEhLDiBR0JSQkJIYRKehKSEhIDCNS0H3IyMor4KU33tGYF+F+Jjw6jne++L7fdXqKRQci8nzUeeWtd7mWdUvzihJDyrAlMZfJZERdvEx8yjVKK6ow0NfD0c6WQH9fQqYF3HfOrIFyLwWEg6G7UNPAQB87ayvmTJ3MwpmB97hk4hjrNVLFHjFYKmtq+dP7nwl/GxkaYGNpyRjPEcyfPhVXR/shO5bEo8WwBF2ZTMZHu/aTdbuAJXNmMNrDHVNjI/KLSzlz8TKuTvb4envelWPfbblfdwYqILxfUAo12zs6uZmbx/6TZzDtMuTe7yyYMfWu7Pdna1fg6e5Ce3sHpZVVnL+cwt8/+5ZtG1eJyo97P9LR2fnQdHIeRIYl6J5NSCY9O4ffPr+FUd00KiNcnJgx2Z+W1jbiklI5FBnNP37zqkqQ/GLfYWSdMl58Yq0gUty2YRXHomOpa2jEwcaazauWCvaD8Og40jKzmTRuDNEJSdQ2NPLJG78TJb28lnWLw5HnKK6oxEBPDxcHe57fsErQ/GhCjIBQWYee+HiO4NfPPgVolla2tLZxKPIsV9Jv0tjcjJ21FStCgoVk8AB5RSXsPX6aovIK3BwdeHr1Mo1qo+7J10c4O5KQep307Bwh6MrlEBmXwLnEZGrqGrC3sWbRrCAh74GY66ONjDI6IZnjMRdobG5m/Ghvtqxa2ssppuTTHw9iY2khOObkcjgTf4noS8lU1dRhbmrCtInjVewlYrC1trxzTlycCPT35Yu9h9l55AR+o70wNFAYFTRdsw927MbV0YH2jg6upGeio6PDrIAJrF4YgjI75ytvvcvTq5Zx+VoGGbm3sbW0ZPOqpTjYWPH9kRNk3S7AwdaaZ9Ysj9OnJQAAIABJREFUFzT2YuSZH+zYjbO9ncLXlpmNm6MDv372yV51PXY2lnOXknnt6U1auQsltGNYgm5C6jXGeXmqBFwlurq6mJoYM9Xfl70nzpCamUVAVw+isbmFlBs3eemJtSrbRF28zG+f24ypsTFHo2L4aOc+/vrLF4UbIK+4BDcnB97Y/jxq0+6robWtjc92H+KxebMJmuBLW0cHuQXF9JGuVu32YgSEo0a48+GffyMsr6yu5d0vv8e3K/WgJmmlXA4f7dpHfWMTT69ehrO9LaWVVUKCdCX7TpzhsQVzsLYw51BENF8fOMqfX3lOVF1kMhmZufkUlVWouL2OnT1PQuo1Ni0LxcXBnvziEr4/fBxjYyPBKAuar48YisoqSM3I4udPb6S1rZ3vD5/gu5/CRWvsj0bFcCb+MhuWzGeslwf1jU3kFQ1NUu4ls6dz+doNMnPz8R/jLVo0GpN4hadXL+PJFYspqajk3S93MtLVWcW3diTqPKsWzGHtohBOxV7kq/1HcLKzZU7gJNYvns+xs7F8c+AYb2zfho6OeHlmzOUrPLVyCZtXLaWzh0FDLpezJzyStMxsfvf8ZhxsbYbkPEmoZ1jedcuqqoVM931hZGhA0ARf4pJShWUJqdcwNzXFb5S3yrqrFs7B3NQEXV0dHps/u2vdO8nF9fX02bRsIcZGRqI9YvWNTbS3dzBx7GhsrCxxsrNl+qTxok3AYgWEOjqK5NQG+vrIZDK+2HeYcd6eLJ0zE1CVVjrYWjPaw51Ny0KJTUpFLpeTmZtH1u0CXnx8Df5jvLG3sWb8aG/hQaVk7aJ5+I/xxt3ZkTWhcyksLaeuobHfsoVFx/LKW+/y6n//Lx/s2I2ODsybrnhtb2/vICI2gSdXLmaCzyjsbawI8BvLgplBnE9U1ctruj5i6Ozs5GfrVuDq6ICXuytbVi0hNSOLsspqjdu2tbcTEZfAmoVzu86jDd4j3LQWjPaFS9d4bmV1LaD5mikZ5z2SaRP90NXVwdXRnvFjvEi/dVtl38FTJjJzsj9uTg6sXjiX2voG/Md4EzTBDzcnB1YtmE1JRSU19QqLiFKe6eHqjJ21FVP9xxESFEBCWrrKfseMHEHwlIno6+mpZAnr7JTx1YGjZObm8fttW6SAOwwMz4c0kX6tOVMn8Y8vvqO2vgErC3PiklKZOdkfXV3V7qa7051XH11dXVwd7VUME0ovljbYWVsz2deHv3++A79Rnozz9mTK+LFYmpuJ2l5bh5hcDt8cDENHR4eta1cIPWpN0sr84lIszEw1ijZHut4RS1p3vbLXNzb1W5+5gQGETAugoamFo2dimOzrI7zGllRU0tbezr+/29tru54JrTVdHzHY21qrZFDzdHNFV1eXkopKwRTSFyUVVbS3dzDOe6RWxxSN8lKLvGbKIRPnHg9kC1NTauobVJapSELNuyShapbVNzYJ+xUjz+yrvfx47BRGhob8ftsWIcm+xN1lWIKuo50tRWWaHVUers64OTpw4cpVxo/xpqC0jBd7DC0AvUR5vYWDvaulSXqptDPkFhZz9eYt4lOucigyml9ufRyvLs1Of2grIDx29jzZeQW8/sJWFdOqJmmlXJSaUeHFUqKsu0zDg6F7MH/pibX8+d//D093F0aNcBMeKn959TmNAb+/6zMQGeUdRK43AImmNhR1+dbsbay6DidONKqr0/vFsufDuvv3DJ0+JKHdtxMrz+zLajF+jDcXU66Rnp3LVP9x/ZZfYmgYluGFaRP9yMi5TXZ+Ya//k8lkKsK+2VMncyE5jdikFHw8R2Jv01sL0n0/rW1tFJaWa3ytFyO9BEUPYeW8YP7wwjO4OtqTePWGxvqBdgLC5OsZnIyJ58XH1/b6SKdJWunh4kx9YxNFZeWiyjVQzExNmBs4mX3HTwOKXpqBgT4pN7I0btvf9REro6yoqlHRvucWFiGTyVXswn2hLOuNHq/uQ8XJmHjMTU0ENbq2otGhZLDyzAA/H362bgXfHgrj8jVxbV1icAxL0A0JmsI4L0/+tWMPYdFxZObmkV9cSlxSKn//fAe3i+7YXKdN9KO6rp7ziSkE92EDOBZ1nszcPEoqKvnup+Po6+lp9JZpkl4WlZVz7Gwst4tKqK1v4Mat24qx6K5gIUbKJ0ZAWFxewbeHwli3eD4j3Zxp7+igvaND6A1qklb6eHowysOdz/f8xNWbt6ioriX9Vi5X0jNFXQttmD99KvklpaRmZGFoYMCS4OkcP3eBqIuXKa2soqCkjOhLyZyJv6yyXX/XR6yMUk9Pj28PhlFUVkFOQRE7jyim3GkaWgCFey10ZhA/nT5HbFIq5VU15BYWE52QLKwjVjJaVVNHSUUl+SVlXL56gw927CYl4yZbVi8ThrC0FY0OJWLkmZqYOn4cP1u7gm8PhpHUrYcsVkAqoR3DMrygp6fL9i0bOXMxkYSUa5yIuYCBvj4u9nYsnBHIWE8PYV0TYyMC/HxIy8wmwE/9a/q6xfP48VgE5VXVuDo5sH3LRpVXdHVokl4aGRqSW1DMuUvJNDW3YG1pzoLpUwmeMgkQJ+UTIyDMziukta2dvccj2Xv8zk2pnDKmSVqpowPbN2/gYMRZdhwKo7m1FXtra1bOG3pljJWFOUET/DhyJoYJPqNZOX82FmZmnE1I4sDJKIyNDHF3dmRx8HSV7fq7PmJllK6O9vj7ePPv7/bQ2NyC32gvtqxaKrrsqxbMxcTYiOPnLvDDsZNYmJkxfeId8adYyei3h8IAxQ9GbLt+HLFp2UKVIRZtRaNDiRh5phim+o9DjpxvDiiuwxS/saIFpBLacV+aI97/9kdcHO15YvkileXKeaAf/vk3w94IJCmfZu7l9dGW+0kyer/yKEk3h5IHyhzR2NxCQup1MnPzmT/t7vzCaKBIUr6Hh3stGX1QeJSkm8PJfdUd+Z9Pvqa1rY0NS+bjZG97r4ujwvol8+91ESSGCGMjI9793fZ7XYz7ntdf3Hqvi/BQcl8OL0hISEg8qDxQwwsSEhISDztS0JWQkJAYRqSgKyEhITGMSEFXQkJCYhiRgq6EhITEMCIFXQkJCYlh5JEPuvklZbz0xju9kuHcTX719gcqv3HXxHCW8esDR/n+8PG7fpz7mWtZt/jvj7/WOl3nw0BP4WfPvyUGz7D8OOJCcho7fgpX+3/v/efP+1SwKLldVMLbn+/gn6//Uq2VYTCYdTnADPSH1hnVXfSoRFdXh0/e+D3TJvhhZ201pMe7l/RX1wcNuRwOnIxixbxgtelAh5K72a6HChdHOxWh6se79uNga8OmZQvvYakebIb1F2l/evnZXstMje9t4mRbK0u2rl1xV/Y9J3Ayc4MChL+Vt/CTKxfflePdS/qqa0+GUxQ6ENJv5VDX0MjkcfePdPJenjO/UV74damkJIaGYQ26/cnuyiqr2RMeya2CQjo7ZdjbWLFu8TxcHOx5+/MdAPz67Q8ACPT3ZdvGVXR0dnL49DkuplyjqbkFNycH1i2ex1gvhTFAmYBl+5YNHIqIprKmDmd7WxVJY35JGX/79Buhx11UVs7BU9HcLiqmrb0DFwc71i4KEfapDVbmZmrr/Ku3P+Dp1csEr9grb73LE8sXceXGTW7lF2FuasLa0JA+k0qLlRFqEiG2d3SwJzySxKvp6OvpMSdwstZ11FTXV956lw1LFpJyI5PcwhIWzQpiUfD0ISm/JvlkSUUlB05GkZmbj4G+Hj6eHmxctlBFftmTS2npjB/jrRLklLLT4CkTCYuOo6m5mUB/X55auYSYy1c4GRNPa3s7MyaNZ8OShYLppKW1lQOnznIlPZO29g7cnR1Zv3ge3iPcqKyp7bNdqztnK+fPJi4plRPn46msqcXG0oLQmdN6nTMnO1uaW1vJyMmjo6OD4KmTWBsaIvTaa+sb2BMeyfXsHORyOWO9RrJpWaiQkL0npy8kEpuUwl9efZ4dh8JI60obeSY+EYA3X9uGjaWlxuupPIdBE3yJvHCJlpZWxnp78vSqpY+cseK+yb3w3U/hWFta8IcXnsFAT5+i8gqMDQ2xs7bi9Re3qn0NO3I6hvgrV9myailO9rZEJyTz4c59vLn9P1QaUWTcJX7+9CYszEz5MSyiX0ljS2sbAX4+rFscgoG+PolXb/DxrgO89do20b60gXDifDwvbFrDSFcXElKv8c2hY4zycBNUO90RLSPUIEL8KTKatMxsXnh8DXZWVoRFx5KakcXU8UNrEDh1Pp5tm1YzaoQ7be1tQ1b+/uSTdQ2N/N/XP6gE4bDoOD7ZtZ/XX9yKrq76nuPN3HwWzAjstby4vJK0zGxefmIttQ2NfLX/KDV1DRgbG/LyU+upqq3j6/1H8XJ3FdJFfvLDAfT19HnlqfWYm5qSdP0G//puD2+8uq3fdq3unF29mc3OoydYv3g+/j6jyMzJY094JGamxirpKWMup7B1zXKeXfcY5VXVvL9jN5ZmZoTOCkIuh892H0Iul/OLZx5HT0+P/SdO8/Gu/fz5led6abF6snXtChqamnsNLzS1tIq6nvnFpXi6ufDm9m10dHTyr+/2cCQqplc2wYedYQ26L73xjsrfDrbW/M8vXgSgsraOoIl+ONkpEt1o0p63d3Rw5mIiTyxfJIw5bVoWSkZOHmfiE1UaxbpF87CyULilFs4IJCbxCnUNjWp9Yd4j3PDuZi1eNncmqRlZpNzI0lpseOxsLMfOxgp/L5o1rc/EOfOnTxW8VtMnjefAqSiy8goI9Pftta5SRqjEztqK/JJSEtLSVRq5UoQIqIgQA/zG0t7ewblLV3jqsSXC6+PTq5fxx39+qlUdxdR1blAAoz3cAYQsbYMtv1I+uX7RfGZPVeQ8VgooQRGwHWxt2Lh0gbC/n61ZwS/ffp/cwmKVa6xELlckVbeyUOeRk/Pc+pVC+af4+ZBy4yb/+O2rGOjr4+HixASfUdy4dZugCX7cvJ3PrYIi3vv9zwU56pLZM7h2M4eEtOssndN/Ssme5+zU+YsE+vuycGYQAE52thSWlnPy/EWVoOs9wpXpXXl8He1sWBI8nRPn4wmdFcTN23nkFhbx1msvCMngn9+wij++/ympmVkq+aW1QWx7NDI0YMOSBejp6WJoYMCsKROJ6pEA/1Hgno7pGnTzeIXODGRPeASX0q7jO8qLAN8x/bq4Kqpr6OjoFJQpoEjwPcrDjeJyVR+bUzfFi3mX7LAvSWNTS6vQA6ytrxf8Xd2PI5ae45yW3USLPempoTE3NaGuoamPtcXJCPsTIZZX19De0cGobsHHQF9fEFFqS391dXXqfR0HW35N8sm8ohJu5Rf2etADlFfVqA26MpkMmUymNhewo62NSlpPKwtznO3tVNa1sjCjossQnFdUQkdHJ7/8+/u99tVXEvzu9DxnxeWVvRKuj/Zw7zIO3/GpufcY4nF3caSmrp6W1laKyyqxMDNTsW9YWZhjb2NNcVnFgIMuiLue9jbWKsM2Fmam1DX2b6h+GLlvxnQXzgxi0rgxpGZkk56dQ9jZWDYtXai5d6nmjUinx0J1r019SRr3hEdQVlnN9s0bBKvw+9/u7iW/FENf45zqUFfGvqYsiZURihEh6uupztrQ0xvYLI7+6tpzZsiQlF/DdC6ZXE6A31hefHyN6Dro6elibGREY3OLmv/rfV56f9zSEconk8sxNzXhvf/8uejjd2egs2k6O2U9/lZtt33NyBjMTA3R11PNkM6jOC3vvvqMbG9jzYIZU3l18wYWzZpGbHIqcCcwyLtZZu1trNHX1yP7doGwTC6H7LxCXBw1ywv74lZ+IcFTJuLm5IChgQGdnbJePed7zWBlhAAOXeevsJvgUi7nrgsvYWjKr0k+6eHqzM3cPFpae0tC+8PDxZFiEeZqzftxpqGpWa2MVYm6dt0XLg52ZOUVqCzLyivA2d6W7vHyVo/jZeUVYm1hjrGRES6OdtQ1NKh4/mrrG6iortEodlWip6eHTK5a3qG4no8Swxp080vKev1rb1e8ivx47BSZuXlU19ZRUFJGZm6e0BBsLC3Q1dUhNTOb+sYmWlrbMNDXZ8GMQH46fY7UjCxKKirZezySsqoqtR9CxOJsb0d6di6dnYpXzYMRUb1+lCBWani3GAoZoYGBPnMDAzh6JoaGLlNxZFwClTW1KuvdjboORfk1ySdDggLQ0dHh0x8PkpVXQEV1LTdu3WbHT+E0qenJKvEb7c3N2/kDr1wXPp4e+Hh68PX+o1xJz6SiupacgiKOnIkRHhTq2nVfLJ49g0tp6ZyJT6SsspqYxCucv5zC4tmqfrrKmloOnjpLeVUNydcziIhNEMaBfTw98HRz5esDR8ktLCavuJSv9h/B0daWCT6jRdXL3saK3IJiKmtqaWhqFgzNg72ejxLDOrzwt0+/6bXs9Re3MtLVmZa2NnYcCqe2oQETIyP8x3izYaniY5ipiTFrF83jp8hovvspjKnjFVNrVi2Yg1wmZ+eRE8KUsde2bBzUDw8eX76I7w8f5/V/foKhgT6B/r695imKlRreLYZKRrg2NIQfwyP4878+x8TIiPFjvAnwHauyzt2o61CVvz/5pKW5Gb/f9jSHIs7yyQ8HaG/vwNbKEt9RnhgY9N3sZwVM4FjUecqrqnGw1Wwe7gsdHXh183qOnIlhT3gkdY2NWJqZ4e3hxoxJ/kDf7Vod/mO82fLYEk6cj+fAqShsLC1Yv2R+r+sSPGUSTS0t/O2zb9HV1WFO4CQh6AK89MRadodH8v63uwE5Pp4ePLNmhcaZC0oWTA8kt7CYtz76irb2dt58bduQXc9HBckcMQAeJanho1RXJXvCI5HJZTy54sH6EcsHO3Yz0tWFtYtC7nVRHmkkc8QQ8yhJDR+lunZn5fzZ2FhaPpIfeSTuPlJPV0LiIUHq6d4faOrpSkFXQkJCYgiRhhckJCQk7iOkoCshISExjEhBV0JCQmIYkYKuhISExDAiBV0JCQmJYUQKuhISEhLDiBR0H1Ky8gp46Y13hDR794J7If18kCivqualN96huq5e9Daf/niQ3WERd7FUEnebeyqmXLVgjkry4weVh0nMCFBUVsHJ8/Fk5OTR0NiEhbkZrg72zAmcxKRxY0SnAbxb0s+hQiaTcTYhmYTUaxSXV6Kjo0jAPX6MN/OnTbmrphCJR5d7msTcSk0S8ftdXNgXD4uY8Xp2Dp/+cBAPFyc2LJ6Pg50NzS2tFJdXEHY2Fjsba9E5gu+m9HOwyGQyPtq1n6zbBSwOnsbqhXMxMzGmoqaW9OxcDkVG89z6x+51MSUeQu55EnOlsG7SuDFEJyRR29DIJ2/8juT0TE6dv0hxeSV6erqMGuHWS6AnRuhYWlnFwVNnyczNQyaT4erowObHlggZ9q/ezObomfMUlVVgYW5K4HhfHlswW609oD+0ETOunD9b43HFiBlbWts4FHmWK+k3aWxuxs7aihUhwYLiBhQGg73HT1NUXoGbo4OKlLMn7R0d7DgUjo+nB9u3bFTJ0zrOeyTzp08VTBpXbtzkmwNH+deffi2sU1JRyZsffsnbv3kFG0uLAUk/xdQ7LDqOS2nXqaypxdzUlEnjRrM2dB5Ghgair9fZhCTSs3P47fNbVOwZHq7OTPEbK9QTYO/x05RXVfPq5g3CsnOXkomISxB0U2La641bt9kTHkl5dTXuTo4sUaPs0VamqUnOKuY6KQWu2zas4lh0LHUNjTjYWLN51VI8XJyE7YbqXnnUuS/OVl5xCW5ODryx/XmUfcT29g6WzpmBq6M9rW3tHD93gc92H+RPL/1M5fW2P6FjXUMj7321C083F37xzOOYGhuTW1iMrCtp9I1bt/li72E2Ll3IWK+R1DY0sDssgrb2dp5YMXSyvJ6SQbHH7U/MKJfDR7v2Ud/YxNOrl+Fsb0tpZRVt7apjuPtOnOGxBXOwtjDnUER0v1LOjJw8ausbeGHTavoaQRCbAlAdYqWfmoSU+nq6PLVyCXbWllTW1LEnPIJDkWe1EhwmpF5nnJenSsAdTD01tdfa+gY++eEAMwP8efnJdRSXV7AnPFJlHwORaYqVs4oh6uJlfvvcZkyNjTkaFcNHO/fx11++iKGBwbDdK48C91RM+a8//UpRCD19Ni1biKHBnZ5K994awNa1y/nl39+nuLxCxZ3Wn9Ax+lIyenp6vPD4GuFp3N0PdfxcHPOnT+0mNrRm07JQPvx+L48vD9VKYaKNmFHscfsTM2bm5pF1u4C/vPqccD7sbXq7t9YumsdYLw8A1oTO5W+ffdunlLOssgoAFwd7YVlRWQV//fRr4e91i+YTOiuo17ZiECv97K/egEqaSTtrK9Yvns9XB45qFXTLKquYMdlfZdkX+w4LihkjQ0Pef/2Xovenqb2eS7yCpbkZTyxfhI6ODo52NpRX1bD/5BlhG21lmtrIWcWwauEczE1NAHhs/mxik1JJSL3O7KmThvReedS5p2O6yiCrdJF1p6isgp8io8kpKKKhqUlQYlXV1qkE3f6EjvnFpYzycOvz9ed2USkZOXmciInv9X819Q19vtKpQxsxo9jj9idmzC8uxcLMtF95J8BI1zuvh0qde19STnU42trwX13X7b2vdg0q3aFY6Wd/9QZIzcji+LkLFJdXquh4WtvatRpi6MmGxfNZETJLMDRog6b2WlxWgfcIV5XgNHqku8o+tJVpaiNnFYO7053hMV1dXVwd7SkurwSG9l551LnnY7pAr6Aol8NHO/fhN9qLP7zwDNYWCq3J9v95j44e4r3+hI6aAoRcLueJ5Yu0VqurQxsxo9jj9idmlCMu+HUXKipv+L6knI52tgAUl1cwqqtnrq+vJwR2nW6vt7pqejbdx0HVIVb62V+9Syuq+HzPITYtC2WK31jMTU3Izivkva930SnCNda9rsVllSrLbKwssQGszFV1Pep6cd3Podj22lNu2fPvgcg0FQVUt0ixUJvr1PP8db8uQ3mvPOrcl5/Ra+rrqaqtY8ns6dhZW6Gnp0tRWUUv06kmPFydyc4r7HOuqoeLEykZN4eiyFoxFMf1cHGmvrFpSEWSY708sDQ3Iyw6TpNsFwszU1rb2lXOrXJ4oi+GQvqZW1iMhZkZIUEBWJiZoqOjQ94AJIhBE/y4kZPbrzhSiYWZaa+5xt3rKqa9ujjaU1iqeq0KS8pU/tZWpilGzqrNdep+Llrb2igsLRc8hffqXnkYuS+DrqWZGaYmxqR1ye0am1vYezxS63GjuYGTae/o4P/t+YncwmLKq2pIvJou3KQr5gVz49ZtdodFUFBSRmllFcnXM1Q+cJRWVnH2YtKQ/shAzHE14ePpwSgPdz7f8xNXb96iorqW9Fu5XEnPHHC5DPT12bp2OZm5ebz39S7FuSoqISuvgLDoOFpaW9Hv6rW7OTlgYWbK2YQkQPEaHR4d1+/+xUg/NeHsYEd9YyO3i0oAKCwtJzIuodd6F1Ou9SuYnDdtCuO8PPnXjj0cizpP+q1c8kvKSE7PJCHtuoqafpz3SG4XFZNTUARA1u0C4lOuCf8vpr3ODZxMcXkFcclpAFTX1XPivOqrurYyTTFyVm2u07Go82Tm5lFSUcl3Px1HX09PcLDdq3vlYeS+mL3QEz09Xf5j02r2hp/m1PmLGBsbsTIkmNzCYq32Y2Vhzu+e28yBU1GCiM/V0YEtq5YCipvpF888zrGz53n3y53o6urgaGfLjEnjhX3kFhSzOzyCaRP9hmxqjJjjakJHB7Zv3sDBiLPsOBRGc2sr9tbWrJwXPKiyjR/tzesvbOXk+Xj2nzhDXWMTxkaGeLg48czq5cIHI0MDA7ZtXMWe8EhOxMTjYGPNwplBfHPwWJ/7FiP91MRIV2fWLZrHZz8eBBRzgZeHzGLnkRMq6x2NOs/EsaN7jRcr0dPT5bWnNxJ1MYmLqdc4FZuATCbD1toSv9FePLd+pbCup5sL6xcv4P/t+YmOzk5GebizcEYg8SlXhX1paq9WFua88uR69hyP5FDEWSzNzQidGaRS7oHINDXJWbW5TusWz+PHYxGUV1Xj6uTA9i0bhTHye3WvPIxI5ggNHDgVRVFpBa89vfFeF0VCJC2trfz6H//id89vwcvd9V4X575HOU/3wz//ZlDBUrpXFEjmiEGSmZPHyvmD6z1KDC83bxfg6+0lBdxhRrpXxCH1dCUkHnGGqqcroUASU0pISEgMI9LwgoSEhMR9hBR0JSQkJIYRKehKSEhIDCNS0JWQkJAYRqSgKyEhITGMSEFXQkJCYhh5JILu6QuJ/PfHX2m1jSQAFMfdkk/uOBRGWLf8xBISDwvDKqa0t7Hif37xkoqV4OrNbD7auR99fT0++vNvB32sX/ztnzy7/jEmdyV1BnBxtBOSPItlrNdIzEyMB12eu0F30aeODliYmeHt7sraRfNwsrcd1rLcLfnk8pBg/vbZN8ztyiamDZJwUhxpmdlExV8W9Em2VpZ4uruwYEYgI12d73XxHlqG7ecnenq6tLV3kJmbJ5gMAGKT0rC1sqSusXFQ++/o7FTJDNUdv1FeWidWWTBj6qDKMxy8+do2AGrrGzh65jwf7drHW6+9MCiljrbcLfmkg601Hi5OxCalslSNS6wv7rZw8n4Xi4olLDqOY1ExzJjkz9a1K7CxtKCmvoHbhcV8c/AYb27fpna7h6X+95JhC7o6OjpMnzSeuORUIeg2NDWTlpnFolnTiOiWnq+1rZ0fjp3kZm4+9Y1N2FpbMn/aVJUEyh/s2I2zvR2tbW2kZmbj5uhARU0NrW3tQgYqS3Mz3v3ddk5fSCQ2KYW/vPq8sK0m+eGnPx7ExtJC8D9V1tTy/eHjZOUVYG1hwZrQuew/GcXa0BCmTxpPU0srv377A15/catKL+H3//sR6xfPZ3pXNiZtxYP9obRmONvb0T63g4927qeiukZQEvUnEhRbXk3yz57ySVDkZT14MorbxSWYGBkxdfw41i+eL2TKEnO7bcb4AAAgAElEQVT+ASaOHUNcsnZBVxvhpFwOkXEJnEtMpqauAXsbaxbNCmLWlInCOurEorq6uqRlZhM8ZSJh0XE0NTcT6O/LUyuXEHP5Cidj4mltb2fGpPFsWLJQeAhqklcqf467fcsGDkVEU1lTh7O9rSATLa+q4S///pw/vvSsSsL86EvJHIs6zz9+86qogJhfUsaxqBhWLwxRObcjXJyY4DOK5SF38if0JY5taW3lwKmzXEnPpK29A3dnR9Yvnqdit9DU1uVyOBN/iehLyVTV1GFuaqLihxNzjAeRYf2hdXDARP7++Q6eXNGKsZERF1OuMtrDXUhDp6RTJsPJzpaFMwIxMzUht6CYnUdPYGVhJniyAGIuX+GplUvYvGopnZ2dGBkaqh1eUIcm+WF35HL4fPchjI0Mef2FrXR0drI7LIJGLccxByIeFEN9YxMXU64DCDfdUIoE+5N/qqvjh9/vZYrfWLasXkpVbR07D5+gUyZj82NLhPXEnH9Pdxf2nzxDQ1OzENA1oY1w8tjZ8ySkXmPTslBcHOzJLy7h+8PHMTY2Ykq3cvQUi56+kEhxeSVpmdm8/MRaahsa+Wr/UWrqGjA2NuTlp9ZTVVvH1/uP4uXuKuSkFStbjYy7xM+f3oSFmSk/hkUIMlEHW2t8PEcSl5TK48tDhfXjklKZOXmC6B7opbTrGBoY9Om66/mmpE4c+8kPB9DX0+eVp9ZjbmpK0vUb/Ou7Pbzx6jZsrS1FtfWjUTGcib/MhiXzGevlQX1jE3lFdxLSazrGg8qwvic4O9jh5uTApbR0QDG0MCtgYq/1TI2NWB4yCw9XZ+ysrZjqP46QoAASurZTMmbkCIKnTERfTw8jQ0OtyqKUH+rq6qjID9WRmZtHfkkZz65biZuTAyNdndmyaqnWyZq7iwddHOxxcbDnZ2tWUFxRqXWuYFD0wl55611+9+6HXEq7TqC/r/AA6y4SdLC1ZrSHO5uWhRKblKq150wp/9TRUcg/TY2NycorULvuuUvJmJoYs2XVUlwc7Bk/2puNyxYSm5Si8pASc/6tLcwBqKqpFV3WssoqwZqg5It9h4Vz9au3PwAUATAiNoEnVy5mgs8o7G2sCPAby4KZQZxPTFHZXikW1dGhWzuT89z6lXi4OjPBZxRT/HzILSziZ2tX4OHixORxY5jgM4ob3eo0baIfk8aNwcHWBndnR7auXU5RWXkve8a6RfOwsjBHV1eXhTMCKSwtp65BMfw2Z+okElKv0dGhUOkUlZVzu6iEWVMmiD9HFVUK60S34bjohGThHL3y1rvkdSWJhzviWGMjI4yNDLl5O59bBUW88PgavNxdcbC1ZsnsGYqHcpri4a+prbe1txMRl8CahXO72qgN3iPchLdZMcd4UBn2lELBUyYSm5SKu7MjNXV1BPj5cLFbFn4lp85fJDY5laqaOiG4Ka2/SjRJGftDk/ywO8XlFVhbWqh8gHFzctA60GsrHtTEf738LJ0yOdl5BZy9mKTSg9UkEtSm7P3JP3tSXF6J9wg3lV77aA93ZDI5JZVVjDJV1FHM+VdmvGobpIlAnXCypKKStvZ2/v3d3l7rO9iqWpV7ikVBIezsfg6tLMxxtrdTydJlZWFGRfWdB4ZY2apTt/Nt3vURUSkTnezrw4/hEaTcuMlU/3HEJqUyysO91zXSlsAJvozxdKesqobPfjyoYuDrKY7NKyqho6OTX/79/V77UZ47TW1dX1+f9vYOxnmPVFseMcd4UBn2oBvo78u+E6c5FBFN0AT1GebjklI5GXuRbRtW4eXugrGREWHRcYIeW8lg0tD1Jz8c2P7UL5d1k/0NWDzYB8obdYSzI7X1DXx94Cg/f3oToFkk2JeHS6ZG7tif/FMdOupMiaj6E8Wc/8YuRY2FqfjZC2KFk8pjdVfY94W6mRk9pZKKZT3rpNNNkjo42apShKmvr8eMSeOJTU5lsq8PCanXWbdoXr/l74mjvS3pt3JVPj6bmRj3OVun530mk8sxNzXhvf/8eZ/H0NTWu/ek+9pe0zEeVIb9M6SxkSEBfmPJzM1T+WDRnez8QsaP9sJ3lCfGRkaAQjkuBj09PeRaWGHF4OJgT01dPdW1dcKywtJyWtvahL+NjYww0NenoelOD7CxuUVl/qq24kFtCJ01jazbBVy9qfB0aRIJiinvQHBxsCOnoFDlg1VWXgG6ujo42Wk3na2orBxjIyMcbG1EbyNWOOnsYIeBgT4pN7K0KtNAGSrZKsDsqZNIz84l+pLCRzZl/Dittg+a4EdrWzuRcZe0PjYopKgNTc39nmNNbV15/m/0MaQn5hgPKvdk7sfmx5bwf//58z7nAjrb25FTUCyMAV6+doOrXdI/TdhbW5F+6zZ1DY1CT2mw+Hh64O7kwDcHj1FYWk5eUQk7j5zo1QMY5z2SmMQUOjtltLa1syc8QqXXIkY82NrWztmLSZRX1WhVRjMTY2YFTODI6RhAnEhQU3kHwtygABqamtl19AQl5ZVcz8ph3/HTBE+ZhJnIj2FKsm4X4DvKUyiTmHMjVjhpaGDAkuDpHD93gaiLlymtrKKgpIzoS8mcib884Pr3xVDJVkHRCfB2d+XgqbME+vsKHjMlmqScI5wdWTEvmMOno9lxKIzUjCwKSspIz87l5PmLAH1OvwTF/eDj6cHX+49yJT2TiupacgqKOHImRgiimtq6oYEBoTOD+On0OWKTUimvqiG3sJjohGTRxxjovXKvuSdp4g309fsdGpg/fSrF5RW8+dGX6Ovr4z3CldBZQVy9eUvjvtcumsfu8Ahik1IwMzHh3d9tH3R5dXTgxSfW8v3hE7z9+Q6sLMxZEzqXA6fOqqz35IrF7PgpnN//74eYmZqwIiRY5UkuRjzY1NzM7vAIXnpyndZjVwtnBhJ9KZnk9EwCfH00igQ1lXcgWJqb8drTmzhwMoq/fvYNxoaGBPr7sn7xfK32I5PJuHztBs92E0SKOTfaCCdXzp+NhZkZZxOSOHAyCmMjQ9ydHVkcPH1gle+HoZKtKgmeMpHsLqV9TzRJOQEemz+bka7ORF28zLeHwmhpbcPSzJRRI9357XObcVMzjq1ERwde3byeI2di2BMeSV1jI5ZmZnh7uDFjkj8grq2vWjAXE2Mjjp+7wA/HTmJhZsb0ieNFH2Mw98q9RDJHDII//N8nwjzdoSItM5sdh8L4269e0vpD3cPEhStXiUm8wu+3bRGWSefmDmFnY7l8LYO/vPqcyvJHScp5v7YHyRzxgJGZk8ei4Gn3VSO6J8jlKvN6QTo3AK1tbeQWFnM2IUntryYfJSnng9oepJ7uILgbPV0Jif74+sBRkq5nEOA7lmfXrRzWn3xLiEMSU0pISEgMI9LwgoSEhMR9hBR0JSQkJIYRKehKSEhIDCNS0JWQkJAYRqSgKyEhITGMSEFXQkJCYhiRgu5Dwt0SRKrjD//3idp0nMPBr97+gKQe2ebuBl8fOMr3h4/f9eOo42GSog5ECvuwMyxB90Jymtq8mqBIxH3uUvJwFOO+YsehMP75zQ/3uhjDQmVNLS+98Q6ZuXn3uih3lZ7X9ONd+9l7/PQ9LNEd+rsHxXC7qISX3niHphbtMuQNRAr7sHNPEt48qPQnv5QYHJLw8OFkIFLYh537KugOlSwR4HpWDntPnKaiugZ3J0eWzpnBZ7sP8fZvXsHG0mLA8ksbKwtaWtt4+cl1wnoymZw//vMTlsyZwfzpA7cIX8u6xbGoWApKyjA00MfL3Y0XHl+NoYGB1pK+orJyDp6KFvTaLg52rF0UwlivO5n6xQgi1Qk5hwJ1wseV82f3K9NUR1h0HJfSrlNZU4u5qSmTxo1mbeg8Id2hUqwYNMGXyAuXaGlpZay3J0+vWoppV9Lu9o4O9oRHkng1HX09PeYETh50/XYcChPSOCptFW++tg0bS0uN7a47cUmpHIqM7iWd/GLfYWSdMl58Yu2gywr9SzMra2p5+/MdAPy6S3cU6O/LWC8PjpyJ4Z3fvqpiCvlq/1Ha2tp4+an1vaSwYtrlw859FXS1oT9ZYnVdPZ/+eJC5QQHMmxZAaWUVe8JVX/MGKr/MLy7jn9/+SF1DI5bmZoAiWDY0NzNt4sBzMKRn5/Lxrv0sDp7O06uXIZfLSc/OFZKBayvpa2ltI8DPh3WLQzDQ1yfx6g0+3nWAt17bpqId6k8QqRRyGhoY8If/eIaOzk5+PHZKayFnX/QUPg5Epqmvp8tTK5dgZ21JZU0de8IjOBR5lieW31k/v7gUTzcX3ty+jY6OTv713R6ORMUI6/wUGU1aZjYvPL4GOysrwqJjSc3IYqqWycG7s3XtChqamnGwtWHTsoXC8qaWVlHtTslUf1/2njhDamYWAb4+gCIXb8qNm7w0RAEX+pdm2llb8fqLW3n78x388/VfYmqsEAs0NbewJzyS9Fu5jB/tDShy3KbcuMnWtcvVHkdsu3yYGdb3uZfeeKfXv+6GAW3oT5YYk3gFW2tLNixZgIOtDf5jRvUynw5Ufjl6pDuOtjYqH5LikhTqlL50J2IIj45j0jgf1oSG4Opoj5uTA6GzgkSLAHviPcKN4CkTcXV0wMHWhmVzZ+Lm5NDLlNCfIFIh5CzlZ+tW4u7siKebC5sHIOTsi57Cx4HINJfMnoGP5wjsrK3w8RzB+sXzBfGpEiNDAzYsWYChgQGmJsbMmjKRG9mKOra3d3Du0hXWhIbgN8oLpy7l+WBUUP0htt11L3vQBF/iklKFZQmp1zA3NcVvlPeQlUusNFOlLibGjB/jTULqnTZ4JT0TXV1dJo4drXYbse3yYWZYe7p/evnZXsve/vzbAe2rP1liSXmlEJCVePWQWsLA5ZfBUxVyzUXB02hoaiY1M4vtWzYOqB5K8ktKWduH62ogkr6mllahB1dbXy883Homtu5PEFlcXoGVuTn2NlbC/3u4OKlICgdDT+GjJpmmjRrle2pGFsfPXaC4vFJFDdPa1i4MMdjbWKu8mluYmVLXqLDrllfX0N7RoaJsN9DX79NqMhSIaXfdmTN1Ev/44jtq6xuwsjDvUq77D2mGMbHSzJ5MnzSeHYfCaWtvx9DAgITU60wZP7bPh5bYdvkwM6xBd4SzY7//L0buKKzbjyxRTN95MPLLmZMncDjyHDkFRdzKL8TawoJxXp4ijto3/TkxByLp2xMeQVllNds3bxBsru9/u5uOzk6V9TQJIsUJGAdGT+GjJplmT0orqvh8zyE2LQtlit9YzE1NyM4r5L2vd9HZrc10H2/sfqzu9PxAqq7eQ4HYdtcdD1dn3BwduHDlKuPHeFNQWjZkY7mgnTSzJxN8RqGrq0PKjZuM8/Yk/VYuv3jm8T7XF9suH2buq8/FQyVLdHGwI7ewWCWQ5fTQogxGfmluasKkcWOITUolLjmNWVMmMgDVlQoeLk5DKum71aVycXNywNDAgM5OWb+viupwcbCnpr5e8LeBoufTrOW0IbFokmn2JLewGAszM0KCArAwM0VHR4c8kddQiYONNfr6ehSWlQvL5HLFB5/Boqenh0yuGrQG2u5mT53MheQ0YpNS8PEcib3N0OlpxEgzlQ+lntJXA319AvzGkpB6ncSr6ViZm+Hj2XevdSja5YPOfRV0YWhkiXMCJ1NVU8f+k2cor6rm6s1bgvlUuafByC9BYWSNv3KVorIKZk32V/m/0soqzl5M0mrsc9ncmVxJz+SnyGiKyysoLq/gTHwiLa1toiR9PXG2tyM9O5fOThkymYyDEVFaP7x8PD1wdbBn/6koOjtltHd0sO/46V7XIyPn/7N33mFRXXnj/8Aw9M7Qkd5BFMSKsddo7MY0Y4pJjGs2W979ZVs2ybslu3mzyWY3dTebxDQ1ib1GsFJURAREURRFuvTeZmB+fyBXBgbmgjig3s/z+Dwyc++555x77plzzz33+8njdC9zy/1BjEyzKy6ODtQ1NHD9ps678EYZcUnJ/TqmXG7ElOhIdh+Op76xCbUa4pKSqaiu0dhuIOdUYWdDbkExFdU11Dc20d6uHnC7GxcRSlVtHQkp6T28aP0RNOaXlGr8K7xRJkqaaWfdMfrNyM6hrqGR5pZbJuzxEaGcv3KN46fTGBsR2qdsU0y7HKz2NFwZdqsXBkOWaGdtxbpHl/L9/sMcS07Fw8WJh6ZP5rOtu4XpgtuRXwIE+3pjbWWBq0LR46lrbkExm/fFMi4iVPQDmVB/H9Y/tpw9RxKISzqNibEc3xHuTB4zSpSkrzurHpzNVzv385t3PsRYbkR0eEi/10t2FXL+6q1/YmFuzowJY3rcNZzKOE9eUQljR4b2K/3uBPt66ZRpdsXLzYVls6fx8aZtANjbWPPg1El8vetAv467dNZUNu2L5dX3PsHMxISwAF8iQzRXEgzknM4YH01uYTFvvP9fWpVKXn9p7YDbnZmpCZGhgZzLziEyNFDju/4IGv/80ecafxsZyXj/1f/RKc00NzNl6exp7Ig7xpc79jImLIS1KxcBHT/ONpYWFJeVC5/1hph2OVjtabhy35gjTqRl8v2BQ/z9lZdveyoAOlxVr7z9IWuWzO+x1GfrwSMU3SjnpdW393DtbuHNTzYSFRbE3MkThjord4zhcE7f/WITrk4KjeVwMHwFjQPlbm9P9605IuFMOgUlpdTU1XMuO4edh44zKXLkbXe4arWamrp6dh2Ox9zMROsrjtnX8lg4Peb2DnSX0NzSQlVtHdPGiXv4dbcylOe0oamZ5IwLZOfmM31cz5dv7lZBozbuh/Z0z450vz9wiNPnsmhsasbOxppxI0N4cGrMbT95L6us4tX3/o29jTVrli4gyMdzkHIsIaGdX//9Q1paW1k4LYaZE8fq3kFiSJHElBISEhJ65L6dXpCQkJAYjkidroSEhIQekTpdCQkJCT0idboSEhISekTqdCUkJCT0iNTpSkhISOiRYdHpDra8bt+xJP72n68GLb3hgD7Fk10RI0m8klfAutf+Nmhxdm+Hoaqn4cpwOjcSHQy5mBKGRl4Xm5jMn7q9h64PBir4szAzZeLo8B7hEG8XXecmyMcLHw834e/hJFvUxp2qp/tJJCpxZxkWAW8keZ1uOt+A0zczJgzc+TYUDFU9SfREko1qZ1h0ut3ldZ3h9Y6nnKW6th6FnS2zJ41lUpeQds0trWyPO0pa1mUamppwsLVhwdQYxkXcikx0Kv08u4/EU9/YTIjfLRnhibPn2HrwCIAwynts4RymjI3UKYAUIzosrahiy744rhYU0tbWjsLOhmVzpuHqqNAq+Fu7cpHOMueXlPLnjz7n7Vd+iqW5GVfyCnj7v9+w4YkVbI89RkV1LS43VTPuzr1H+u8vH23ahp21FY8smN2rbLGTvKISvtt/iKKyctydHDXy8t3+Q5RVVvGTx1cI2x8/fZbYpGT++PILQN9yxE50SUm715MYAentknbxMp9v3c17v/uF8FlJeQWv/+tT3vzlemytrPjnl1tQo+blJx/BwKAjYNKfPvqCMH8fwf+mS8q5/o23WL1oPmfOX+JS7nXsra15fNE8HO1s+GrXAa5cL8DR3pYnlzzYw3xx+Xo+W388wo2KStydHHl80Tw8XZ2F75NSMziQcJKK6hrsrK2YNXGcRh396aPPGR8RxuyYccJnn36/CxNjOasXzxfyp002ejDxFHFJp2luaSU8wBcfDzeOp5wVzvv9xrDodLuz52gCyRnneXj+LFwdFeQXl/DVzv2YmpoQdVOY+P4331PX0MjqxfNxUdhzo6KSVuWteauS8gpSMrNYu3IxSpWKjdv3CjLCiZEjqW9s4lTGeX7fTSEkRgCpS3T45Y592Fpb8evnn0QuM6KorBxTY+NeBX9iytwbcUmn+enqh7GyMGfT3lg+27qbV9c/M5inQ6A32WKnm+77A4d5aMYD2FpZsj32WL/z0pccsWuM1r6kpN0RKyC9kxgYwFPLFvC/H35GbNIp5sSMZ8u+OIxkMpbPmQ4gWsq560gCi2Y8wNLZUzmYeIr//rALZwd7HogexfI509lzNJHPt+7htQ1rNYI7bTt4lFUPzsLa0oI9RxJ5/+vv+dPPXsBYLifzcg5f7z7A8jnTCQ/0I/taHlv2xWFhbtrv8IrdZaOpFy6x63A8jy6YQ5CPF5nZOew6En9bPsG7nWE39lcqVcQmJvPowjmMDPRDYWdDZGgQMyaOJSElHegQJl65XsALq5YQHuCLws6WMH9fwZYKHVr0Z1c8hLe7KwFeI5g+fowgI+wNsQLIvkSHABU1tQR4j8DZwR57W2vCA3zx9/K4rTL3xrLZ07CxssTQ0JCZE6IpvFFGbX1Dn/vcKZbOnkZ4gC8eLk4smTWl33kRK0fsS0ranf6KIO8UNlaWrF48n12H4tl1OJ7kjAs8u+Ih5PKOcY9YKWdMVAQTR4fj7uzI4plTqKmrJzzAl7EjQ3F3dmTRjMmUlFdQXVencfxFMx4g0NsTF4UDqxfPR9XWJgglDyacIjo8hJkTx97swEczecwofkw41e9ydpeNHj6RwsTRI4mJikBhZ8O08VGE+HkPvCLvAYbdSLekvIJWpZJ/fvldj+86AzTnF9/AysK8T2Gek4OdoEMBsLW2EmSEvSFWANmX6BBg1sRotuyL5fS5C4T4+RAZEtBnXsWUuTecuwg6LS3MAahraBT08PrEy+3W7WrnqLM/eRErR+xLSqqN/oog7xSjgwMYGxHKvmNJLJszDY8uzkCxUs6uUwI2lpYAjNDyWV1Do4bIs+vDUBNjOR7OThSXVQBQXFbRY0Tr7+lxs8OnX+FQu8tGS8ormBQ5UuMzb3dX8m7aPu5Hhl2n2/mr/oefPNNrR6UWoZ7sLho0MDDoVePdiVgBpC7R4cyJYxkVHEDGpRyycq6x92giD8+b2es8opgy956XnldEu45y3im6yhw7pwM686JN4dI1n/2RI/YlJe3OQESQ/cVQW9nae+ZHqVJxvbAYQ0ODHmodsVLOrj/2nYfVeFh180NdbV2cvrVrstrK2FNa2UM22q+j3B8Mu+kFF0cH5HIj0i9e6XUbT1cX6hoab0seKJPJelwYAxFA9obCzpYZE8bwk8dXMHvSOBLPZgDaBX9iyjxc0CZbFIOVhXmPtbOlFZXC/8XIEQfC7QhIxWJlYU5Lq1JjLWzXsnWy9eARlKo2Xn7yEZLOZpB+8ZaEs79Szv5yraBI+H9Lq5KCG2W4OnbcMbg6OvSYnrmSV4CLwl7o2LWdvxtaytgdV4VDD71Tbre/7zf02ulqk+J1x1guZ27MePYfP8GRU2e4UVFJQUkpx06f5fDJM0CHk8nP04NPtuwg8/JVyqtqyLqaS1pWtui8KOxsqKiuJr/4BvWNTShVqgEJILWxac9BsnPzqKqppaCklOzcPKGBaxP8iSnzQLhdYaE2tMkWxRDs68X1omLh4r9yvYCT6eeF78XIEQeCGBGkWOlkc6uyRz2VV1Xj7uyIlYU5R5NTgY7pkH3HkjT2PX/lKvEpaTyzfCFBPp4snDaZr3buF+a8+yvl7C+7DseTnZvPjfJKvtq5H5mhoTClMGfyBE6fy+LwyRRKK6qIT0kj4Uw6cyaPF/YP8fXmzPmL1DU0olarOXzyjDA90RczJkZzIu0cSakZlFfVcDQ5laycXG4pYvvXTu8F9Dq90JsUrzsLp0/GysKCo8mpbP3xCKYmxni4ODEnpqMRGBjAhsdXsC32KBu376WppQWFrS0Lp4nXqYQH+DIqOIB3N26msalZWDLWXwGkNppbW9m4fR819fWYmZgQHuDLinkdT/t7E/zpKvNAGAxhYXe0yRbF4O3uyvI5M/j3lh2o2trw8/Rg5oRoTqZnAh23yLrkiANBjAhSrHQyr6ikRz2F+vvw09UPs3blIrbsi+NA/Ekc7WyZOXEsn2/bA3TMr27cvo8Hp0wS5lbnPTCBrJxrbNy+jw1PrOy3lLO/LJ09lU17DlJWWYWbsyMbnliJibEc6LgWnnhoLgcSTrL14BHsrK1YPne6xjzvtPFRlJRX8Mb7nyKXyxkbHsJoES80RYUGUV5VzY5Dx4UlYzMnRHPm/K3pnf6003sByRxxD3OvCQvvFMNBOnk/sXHHPpqamln36DLg3munkjniPuZeEhbeSe4nkai+UanaOHzyDGWVVVTW1HI0OZVT6ec1XnS639qpNNKVkJC4Y6hUbfzzq+/IL7mBqq0NZwd75k2eQPTIkKHO2h1DElNKSEhI6BFpekFCQkJiGCF1uhISEhJ6ROp0JSQkJPSI1OlKSEhI6BGp05WQkJDQI1KnKyEhIaFH7tpOV4ww8U5yN8sv17/xFuevXNW94QDQ13kpq6xi3Wt/o6q2TvfGEoOGPsWfn23dzVc799/x4+gbvcReOHH2HBt37ENhZ8MfX16nEZ8z83IO73/9Q6/v+ovlg29+6GE0GCxe/vM7PL38IVHvmt8unXUV6D2CXzz9mMZ36994i2eWPSR6YXlsYrJWO4ZEh2jyRFomC6fFsHD6ZOHzTg3SW7/aMCQxie80neWGjhCl9jbWRIcHs2BaTJ9xJyQGD72NdGUyQ1qVKrJz8zQ+T0w9h71N34uJ7zcMDAy4ml8kRNy6G7jdEIxDgdzIiNik04Nu2lC1tQ1qeoONt7srr7+0lt+te4q5k8dz6EQKO+KOD3W27hv09tNmYGDA+FFhJJ3NIMjHE4D6xibOZV9h9qRxxCYlC9v2VybYmzCxvb2dbQePcb2omFalCldHB5bOnkqQj5fofP/23Y9oaVXy8aZtAFhbWvDWrzYI3/cmv+xEl2xQGzKZIQ+MGc322GOEB/j2Gt6wL4lmX/JNgPqGJj7evJ0LV65hbWmhIXfUlTbcEnSOCg7gWHIqNfUNfPjar3rkUZdsUqxg8+LV62zZF0dZVRUezk7MfWCCxnF6k4GGB/j1Ws8BXiOoa2hg79FEHl04p9ftcvIL2fbjEa4Xl2BmYsKYsGCWz5kuqMyjYlwAACAASURBVHb+sXEzLgoHWlpbycjOwd3JEW8PVwpLyoQgOgln0vl61wHWrlxEdHjHncr/ffo14YF+zJ8ykaLSsj7b6ufb9tDc0sqLN4PEQEeg9N++8yFzH5jA9PHirc3GciPBvuHu7Ehe8Q3SL2azct4MVG1t7Dx0nFPp52lsasbd2ZFlc6b1es2IuVb/sXEzbk6OKFUq0rKyMTAwYFLkSBbPnCrc9SpVKrbsiyMlMwsjmYwHokeLLs/dhl7vJ2IiI/jLJxt5dEELpiYmnErPxN/TAwdbG43t+isT7E2YeDW/kMjQQJbNmYrcyIiUzIt88M1W3nhpLXYiR9d/+fmLvU4v9CW/BPGyQW08OHUSJ9LOcSIts4fupJO+JJp9yTcBdsQdY+H0ySyaMZnkjAs95I5iBJ15xSW4Ozvy2oZn6RoftStiZZN9CTZr6ur58NutTIwM58VHl1FcVt4jzmxvMtA+Mejwur3/zQ/MmBiNs4N9j01q6xv411ffERUaxBOL51FZU8vXOw/Q1t7O4w/NFbaLP5PGYwvn8viiebS1tXH5egHHks/S3t6OoaEh2bl5WJqbcelaHtHhIbQqleQWFbNszjSgw27dV1t9YMxo3vliE7X1DcK0x/krV6lvamJcxO2FfzSWy1GqOkbnuw7FczItkycWzcNZYc+x5LP86+vveX3DcxpW5k7EXqvxKWmsXjyfRxfMoaS8grc+/RovNxdhmx1xxziXncPzq5bgYGPD3mOJZFy6wpiw4B7HvNvR64M0F0cH3J0dOX1TCpiYeo5JkRE9thssmaDvCHdioiJwc3LE0d6O+VMm4u7sOGiGBl3yS7GyQW1YWZgzO2Y8uw/Haw2uLVai2RuToiKEulk8cwrGcrlgDxCbtpHMiIfnz8TUxARTE+0dnFjZZF+CzeMpaVhbWvDIg7NxcrBjVHAAMyZEa+zfXxloJ6H+PgR4jej19vr46bOYm5nyxKJ5uDoqCPP3ZeX8mSSmpguB0aFj1BwTFYGRTIaJsTEBXh4oVSohHnB2bj6zJo0j+1rH9FpOXiEyQxne7h3xdXW1VX8vD5zs7TjVJfB7UmoGo0MCB2zWbW9Xk5NfyKn0TIJ8PFGqVBw+lcLimVMYFRyAi8KBh+fPwsneXriD7I7YazXY14txEaEYGhrg5qQgLMCHrJtiAKVSxfHTaSyZNZVQPx+cb97p3KtzzHovVUxUBImpGXi4OFFdW0tkaKBGQ+pkMGSCjc0twi9oTV2dYDkI8Bpx+wVBt/xSrGywN2ZNHMux5FSOnDyjEcUfxEs0e8PLzUX4v4GBATaWloLcUWzajva2GMvlfR5HrGyyL8FmcWk5viPcNEbG3TvU/spAu7JszjT++u+NGkqbTorLKvAd4a7hxfP39KC9XU1JRSV+5h3TLd2PZWJsjKerM9m5+Viam9Pc0sL08VHsPZpITV092bl5+I1wF/xmYtpqzJiOa2d2zDjqG5vIyL7Chif6HwM4Ozef9W+8hVrd4VIbGejHynkzKa+qRqVq0zimgQH4ebr3+JHsiphr1cVRUyZqZW5OdV09AGVV1ShVKvxuTl1Bx3x71zZ6L6H3Tjc6PITvDxxie+wxxo7UHql/sGSCW/bFUlpRxYbHVwgdxLtfbB60Bx265JdiZYO9YWIsZ+G0GHbEHSNmzCiN78RKNHtDQ2ZIx8XVmXexaesaidyubLKruFLWra67/91fGWhXvNxcGBMWwraDR1k8a0qP7w16mTrp+qm2ugj08eTStetYmpvh7zkCE2NjvN1duXQtj+xreYQH3ppvFtNWJ44eyc6441wrKOJqfiG2VlYE+3jrLJ+28q5Z+iAyQxl2NlbCD2d9Y2PPggll1V4HYq9VQ4O+Za7Q83rqfo7vFfS+TtfUxJjI0CCyc/M0Ahl3ZSAyQW3CxKv5hcREReDu7IixXE5bW3ufv9h9pa3WYj7VxWDIBiePGYWVhQUHjp/olrZuiaY2+aYYBkvQOViySVcnRQ9nW2FJaY/tepOBimHxzClcLSjk3CXNFSOujg5cKyjUqMcreQUYGhponQPuSqC3Jzl5hWTl5BLo0zF6DPTxJPNyDrlFxQR63xpRimmrluZmjAoOIDE1g6Sz55gUFdEvPXonJsZy3JwccVbYa9ypKOxsMTKSkXP9lqRSre6YCnF1ctCW1KCIPx1vHrewi2hWrea2xLPDmSF5OeLxh+by91d+2uvtgxiZYHe0CRNdFA5k5eTS1tbesZIh9ohWo6kuKaHC1oasq9eprW+goalZdDkHQzZoaGjI4llTOJJ8RmNkIEaiqU2+KYbBEnQOlmxySvRoisvKSTp7DoCq2joOJGhO2fQlAxWDo70tD4wZ3WPucsrYSOobm/hm9wFKyiq4cOUa3+8/REzUKCzMzfpMM8DLA1VbG2ezLglP/wO9PUnJzNKYzwVEtVXo+BE+mZZJUWk5k0ZrevtuV/AoNzJixoRodhw6TsalK5SUV/Dd/jhKKyt7zKF3zXd/r9Uex5UbMSU6kt2H46lvbEKthrikZCqqazS2u3Qtj9MinlcMd4ZkplpuZNTnrakYmWB3tAkTVz04m6927uc373yIsdyI6PAQQv18NPYTIyVcOnsam/fFkpiajoWZmcaSsb4YLNlgVGgQsYnJGnOOBgbolGj2Jt/UhZi0xTBYskkbK0vWP7qcLfvj2B57FGtLC2ZNHMvXuw4I2/QlAxXLgmkxnEzPRNnlt8na0oKXVj/M1h+P8KePP8fU2Jjo8BCWz5muMz0TY2O83FwoLqtghIszAL4j3DA0NNSYzwVEtVWAYF9vrK0scFUoeqzAGQzB46IZD6BuV/P1rgPCkrGXnljZY4VRJwO5VrWxdNZUNu2L5dX3PsHMxISwAF8iQzRXKp3KOE9eUYmGMPNu5L43R0hSQom7iZbWVl55+0PWLJnfY/nkvSZ47M6bn2wkKiyIuZMn6N54CJHMETqQpIQSdwNqtZqaunp2HY7H3MyEUVpeSb+XBY/NLS1U1dYxbdzAHkoPJ+77ka6ExN1AWWUVr773b+xtrFmzdIHwVqfE8EMSU0pISEjoEWl6QUJCQmIYIXW6EhISEnpE6nQlJCQk9IjU6UpISEjoEanTlZCQkNAjUqcrISEhoUekTvc2ECvpO3/lKv/7wWdC7IRDJ1L43w/+O6h5+fmb/yC1n1HYutNd9ngn8tkf7qRAc7C5klfAutf+1md8izsldRyMcz8cUKvVfLVzP7/463use+1vXC8qGeos3RH0KqaEjvf6rS0tCfAewfLZ00QbHLRxvaiENz/ZyDu/+Rnmpia6dxgC1GrY+uMRFkyLEQK9uDo5aH2jaLihr3zeDedxMLAwM2Xi6HDkRvdmyMLbJf3SFc5euMSv1j6BjZWVbvPHXYpeA968/tJaUENpZRXf7T/ER5u28ZsXnhpQeLq7RYSYdfUatfUNjA4OFD4L9fPRGsxkuHG35PNO0xHsu10jkPlA6HybTEI7ZRVVKOxscXVUDHVW7ih67XQ7ZXgujg60tir59IddVNXWYm9jTVJqBgcSTlJRXYOdtRWzJo7TCEC9/o23WDF3JukXs8ktLCEmKkIIw/eLN/8BdARIX7tyEX/66HPGR4QxO2acsP+n3+/CxFjO6sXzAWhVKtm8N5Yz5y8hN+oQ4VVU1yA3MhK22XssidPnLlBRXYOluTmjgv1ZOmsaJsZ92xK6cvpcFmEBvhoRpQ6dSCExNZ0//ORZQJy4T62GwydPc+z0WSqra7E0N2NcRJjg2OpK2sXLfL51N+/97hfCZyXlFbz+r09585frBWOFLtlj93x2yijHjgwh7sRpmptbCPLVlHGKqdeuVFTX8OYnG4Ge5xFuX6CpDV3ntVOW+cKqJew6nEB5VTUvrV6Jp6sL2+OOkpZ1mYamJhxsbVgwNYZxEbeiXuUVlfDd/kMUlZXj7uSoIdjMLynlzx99ztuv/BRLczOhPsMDfEk6e46GpiaCfb15YtE8LLuEjeyv3FRMuz2YeIq4pNM0t7QSHuCLj4cbx1PO8seXXxC20XVNxiWd5mjyGapr6zEzNcHfawQvrFrSa733Jfj8ZMsOIfD5utf+hqO9rUZe7iWGTEJkfLMBKFUqMi/n8PXuAyyfM53wQD+yr+WxZV8cFuamGmHcDiacZO3Di/Eb4UGrspXxo8IGfFu67eBRsq5e58VHl2FnY0VsYnIPEZ6RzJDHFs7FwdaaiupatuyLZXvcUUE8KYbLufm9xiLtii5x3+4j8Rw+eYYVc6cT5ONJXUMjeUX9CxbdFTGyR23kF9/oUHhvWItK1cZ7X27RkHGKqdeuONja8JsX1vR6HgdDoNkdsef10IkUXnpiJbbWVqja2vjnV1uoa2hk9eL5uCjsuVFRSatScw73+wOHeWjGA9haWbI99piGYFMb14uK8XBx4n9/+jxKlYpPNm/nyx37WP/YcmBgclNd5Uu9cIldh+N5dMEcgny8yMzOYdeReA3Xmq5r8uLV6+w+Es/alYvxdHWmvrGJy7n5vZZTl+DzhVVLOBB/ktQLl/jtC2t6TedeYEgepJVX1XAg/iR2NtY42tlxMOEU0eEhzJw4FmcHex6IHs3kMaP4MeGUxn5Txkbi7+mBgQG3FUmpVakk4Uw6y2ZPJdjXC2cHex5bOEfDdwYwd/IEAr1H4GBrQ6D3CJbPmS5INcWgVne4wGysLHRu25e4r1WpJDYpmSUzp9yUXNrhO8J9wBogECd71IaJsZwVc2dgLJdjbmbKpKgIQcYptl77w2AINLsj9rwumT0Ve1trDA0NuFZQxJXrBbywagnhAb4o7GwJ8/clMiRQY5+ls6cRHuCLh4sTS2ZN0RBsasNIZsSKudORyQwxNTFm5fwZZFy6QmlFFTAwuamu8h0+kcLE0SOJiYpAYWfDtPFRhPh5a6Sh65qsrK7BwsyMYF8vbKwscXd27LM9ihV83g/odaS7/o23gA4LqZuTIy+sWoKhoQHFZRU9AhP7e3rcbFgIt9huzuJEg7oor6pB1daGj8etyP2GhoaMcHHS2C7j0hX2Hz9BcVkFzS0twuctrUpRUwzt7R0WADFW077EfSXllSiVKoJ9vXSmIxYxskdtKOxsNaZKrCzMBRmn2HrtD4Mh0OyO2PPaVTaZX3wDKwtznbJLLzdn4f+do/FOwaY2FHa2GgMIV0cFMpkhJeUVODnYDUhuqqt8JeUVTIocqbGPt7sreV1WC+i6JiOCA9gff5LfvfsxYQG+hPp5Mzo4ELlce1sXK/i8H9Brp/v7F5/GwMAAawsLnaoTbYh96qtNB9Ou4TjTHVjtRnkln2zZzsPzZxEVGoSluRk5eYW8/dk3tIn0pXWMXkxEKX76FPfp0LX3TEtb+XumoUv2qDVtLQ+Tbo24Bj9g3WAINLvSn/Patb2pRZatax12tsP2Ps5fW7umJFWtVmucq/7KTcWUbzDOkqW5Ga9teJaLV3PJyslle9wxdh1O4LcvrMGsl6k+MYLP+wG9Ti+4OTni6qjo0eG6OjoIt4ydXMkrwEVh3+fKhk57aHdppJWFuVYXWicKO1uMZDIN/U17ezv5XWSHuYXFWFlYMHVsJFYW5hgYGJDXT+EegKerE8Wl/ZdhdsXF0QG53Ei0n8zKwpyWVqXGmtHSLuUH8bLH/iCmXrXR23nUxUAEmgM9r56uLtQ1NA66LLG8qlpj+uFqfhFqtVp46NxfuamY8rkqHLjWTZnUXaEk5po0kskID+jQt/9h/bNUVNdw+br2ed3bEXzeawyLlyPmTJ7A6XNZHD6ZQmlFFfEpaSScSWfO5PF97mdn3aH0zsjOoa6hkeaWVgBCfL05c/4idQ2NqNVqDp88Q3FZhbCfsVzO5DGj2B57jEvXrlNaUcW3ew7S3NIijE5cHB2oa2gQFmgX3igjLim532UL9ffttSGKxVguZ9bEsew4dJzE1AzKKqvJLSzmWPJZrdu7OztiZWHO0eRUoGNeed+xJI1txMgeB5JPXfWqjd7Ooy4GItAc6HkN9PbEz9ODT7bsIPPyVcqrasi6mktaVraovPaGkcyIL7btpai0jGsFRXy750dGBvrh5GAH9F9uKqZ8MyZGcyLtHEmpGZRX1XA0OZWsnFy6jjl1XZNnMi8Sn5JGUWk51bV1N4WRaiHf3bkdwee9xpCtXuhKeIAvTzw0lwMJJ9l68Ah21lYsnztdp4DO3MyUpbOnsSPuGF/u2MuYsI6lRtPGR1FSXsEb73+KXC5nbHgIo7st8l82ZxotSiUffrsNIyMZU6JHE+zrjdHNW0ovNxeWzZ7Gx5u2AR1rLB+cOklDhiiGSZEj2XMkgbLKKhzttTdIMSyaMQUzUxP2Hz/Bt3t+xMrCgvER2gWXxnI5a1cuYsu+OA7En8TRzpaZE8fy+bY9wjZiZI8DQVe9aqO386iLgQg0B3peDQxgw+Mr2BZ7lI3b99LU0oLC1paF025P9eTmpGBkkB///PI7GpqaCfX34YlF84Tv+ys3FVO+qNAgyquq2XHouLBkbOaEaM6cv/VWm65r0szUhMMnU9geexRlWxsuDvasXblYGKF353YEn/cakjniJmq1mj/8899MHz9G1FP8/rBlXxzt6nYeXTBnUNO9G7iT9Xq307lO95XnVg91Vti4Yx9NTc2se3TZUGflrkeXOWJYjHSHgsIbZdyoqMTHw43WViVxJ05TW9/Y63rS22Hh9MnEp6ShVqv7vM2+F9BnvUoMDJWqjeMpaYwM9EUmk5Fx6Qqn0s+z7pGlQ521+4L7ttNtV6vZdyyJ0ooqjGSGjHB15n+eeQwbK8tBP5aFmSnzHhje2ujBQp/1KjFw0rKy2X0kHlVbG84O9jyzbCERQf5Dna37Aml6QUJCQmIQkcSUEhISEsMIqdOVkJCQ0CNSpyshISGhR6ROV0JCQkKPSJ2uhISEhB6ROl0JCQkJPXJPdbqfbd3NVzv3i97+TokC7yTd5ZFiESNOvN84cfYc736xaaizMSQMtXT0fkavYkqFnQ1/fHmdRuSwzMs5vP/1DxgZyXj/1f/RR3YE7lVR4Aff/ICjvR0Pz5+pt2Oq2tr49dsfoFS18ddfru81vN9wYvyocPYeSyLzcg7hAX6i96uoruF3734s/G1maoKro4IF0yYR5u97J7I66NwtctR7Eb2NdGUyQ1qVKrJz8zQ+T0w9h/1tGIFvh05RYG8WCrW6exxeid5Iz7qMws4WXw+3Pq0Ng8VgiEkNDQ2YMCqMwyfODGj/p5Yu4PWX1vLyk6twc1Lw0aZtGiFEhzOhfj4snjllqLNxX6K314ANDAwYPyqMpLMZBPl4AlDf2MS57CvMnjSO2C7h51palXy750cu5+ZT19CIva0108eN0QjkrFSp2LIvjpTMLIxkHQLE7qjVEJeUzPGUs1TX1qOws2X2pLFMiooAeooCexMSWpqbse3gMa4XFdOqVOHq6MDS2VMJ8und5FBUWqZzn/VvvMUjD84m7eJlruYXYWlu1kO8qEse2Z2N2/dyLjsHQBB3vv7SWuH7vsSJ0H8JYieJqRmMHxWGuakph06mMHVspPBdZ712J9B7BL94+jFAtwSxu5h09qSxLJw+WWd+z1+5ys644xSXVyCXyXB1VPDsikWCPy0iOIC9xxKpa2jEysK8zzJ2x97WWoiq5eakIOFMOpdz84X4sDV19WzZF8eFnGuo1WqCfLx4eP4sFHY2wK2ANzFREew9lkRjUxPR4SE8tnAu8WfS+DH+JC1KJRNGhbFi7kwMDTtuEc9mZXMw4RTFZRXIZIb4jXDXSLezvjc8sYLtsceoqK7FRWGvca67S0fFtFeJwUGvsRdiIiP4yycbeXRBC6YmJpxKz8Tf0wMHWxuN7dra23F2sGfmhGgszM3ILSjm690HsLGyEESNO+KOcS47h+dXLcHBxoa9xxJ7CBD3HE0gOeM8D8+fhaujgvziEr7auR9TUxOibqajja5Cwra2NvJLbhAZGsiyOVORGxmRknmRD77ZyhsvrcWul1F6c0urqH0OJJzk+YeXdHi9Ms5riBcHIo9cs3QB9Y1NPaYXOgNS9yVOHIgEEaCyupbs3DyeXr4QY7mcb/f8SH5JqaDp8Rvhwb9e/aWwfUVVDW99+hUhN/XuAxWT6spvS2srH2/ezkPTJjN2ZAitKhW5BcUa01sezo7IZDKuXM8X2lZ/aVUqOX46DQDZTbOGWg0fb96OWq3m5SdXIZPJ+OHAIT745gdeXf+M0IEWl1VwLjuHFx9ZSk19A//9YTfVtfWYmhrz4mPLqayp5bMfduPj4SbUhVKpYt4DE3BzUtDSqmT/8RN8vHkbv1v3lEZApbik0/x09cNYWZizaW9sn5JMse1V4vbR64M0F0cH3J0dBUleYuo5JkVG9NjO3NSEB6dOwtPNBQdbG8aEBzN1bCTJN/dTKlUcP53GkllTCfXzwfnmr3jX0ZhSqSI2MZlHF85hZKAfCjsbIkODmDFxLAkp6X3ms6uQUC43wneEuyBHdLS3Y/6Uibg7O5J+8UqvaYjdZ/r4MXi7u2JggDBS7OwgByqP7Iu+xIkDkSACJJ7NIMTPGysLc0yM5YwOCSQx9VYdGxiA3MgIuZER7e3t/Of7nQT7ejPvgYmAbgliJ93FpLryW9fQiFKpIiLIHzsba5wd7Bk/KkyjEzE0NMTKwoLy6pp+1+W7X2xm/Rtv8dM/vcMPPx7GycGOyNAOUeXl63nkFhbxzPKH8PFww9PVmWdXLKK0spKM7K5tQM0zyxfi6ebCyEA/okIDyS0s4qmlC/B0dWZ0cAAjA/00grKPiwhlVHAAjvZ2eLg4sWbpgxSVllFcpmkoWTZ7GjZWlhgaGjJzQnSfksyBtHGJgaH3KGMxUREkpmbg4eJEdW0tkaGBnEo/32O7gwmnSDybQWV1rfDE3dvdFYCyqmqUKhV+I27J7ORGRhoSw5LyClqVSv755Xc90u5LWgj0kA82NrcII+uaujpBORLgNaLXNMTu0z3os6W5mSBeHKg8si/6EicORIKoVqs5cfYcy+ZMEz6bMCqc/3y/k+Vzpmv8EKrV8Pm2vRgYGLBm6QJhxDlQMamu/DrY2jI6JJC/fLKRUD9vgn29iQoL6iGJNDYyQqns/6qO1Yvn4+3uQmllNdsOHmHN0gWC+bi4tAIrCwsNk4KNlSUKO1uKS8uFoPpO9nYazxRsrCxxUTho1JuNlQXlVbd+FIpKy9kRd4xrBUXUNzYKCr3KmlqNtuvcpW1Z3pw66U2SOZA2LjEw9N7pRoeH8P2BQ2yPPcbYkaFa5wqTUjP4MfEUa1cswsfDFVMTE/YeS+LshUsa2xn1IVbsHJn94SfP6DS4dqf7aoYt+2Iprahiw+MrcLS3xVgu590vNqNqa+slBfH7dN5mdqXrqHIg8si+6Euc2F8JIsCFnGsdt8Bbd/PZ1t3C5+3tas5eyGZcxK3OdM/RBHLyCvjN82tE2ZS70/28iMnvukeWkltYTOblq5xMz2R73DF+tmaVhrG4oamp3/O5AAo7G9ycHHFzcsRELuffW3bw2oa1mN9cudFb7OSun2s7n91lnGAgtAm1Gt7/+ntC/X349fNPYmvVoTra8Me3UXV7uKitbfUmyRxIG5cYGHpfp2tqYkxkaBDZuXnCA63u5OQXEubvQ4iftzByyO8i13O0s8XISEZhF0mgWo2GNLBT5jgYt0dX8wuJiYrA3dkRY7mctrb2Hrdyg7FPdwYqj5TJZLSr+/90v78SRIDEMxlEhgTy+xef1vg3ecwojSmGsxcu8WP8SV5YtVR4iNXJQMWkYvPr7e7Kwmkx/Pr5J3FzUpCSeVH4rra+gfrGJka4OveRgm5C/Lyxt7HmwPETQMeSrNr6esoqq4VtaurqKa+qxtVRu9JGDNV1dVTW1DJ38ngcbG2QyQwpKi2/7dUcg9FeJcQxJC9HPP7QXP7+yk81pgO64qJw4FpBMQ03X1o4c/4imTefyAPI5UZMiY5k9+F46hubhFUKFV3m5YzlcubGjGf/8RMcOXWGGxWVFJSUcuz0WQ6f7N8SIReFA1k5ubS1tdPe3s622CM6X6gYyD7dGag8UmFnQ25BMRXVNdQ3NmnVr2ujvxLE+sYmMi5dYWLkSGHE1/kvJiqC7Nw8yiqrKS4r54vte1k2Zzpe7i4oVSqUKpUwihqomFRXfotKy9hzNJHrRSXU1NVz8ep1SiurNDq9K3kFWFmY4+l6qy1eupZ3U7TYP2bHjONociq19Q0Eenvi7e7GZ1t3k1tYTF7xDf77wy6c7O0ZGTjwYOHWFhaYm5kKK1Qampr5bn/cbRtJxLTXgdaLhCZDYo7ofKjSG9PHj6G4rJzX3/8UIyMjfEe4MWvSWDIvXxW2WTprKpv2xfLqe59gZmJCWIAvkSGaT58XTp+MlYUFR5NT2frjEUxNjPFwcWJOTN8Xc3dWPTibr3bu5zfvfIix3Ijo8BBCbz55H8x9ujNQeeSM8dHkFhbzxvv/pVWp1Fgy1hf9lSCeTMvEyEimtVw+Hm7YWVuTmJqOws6WllYl3+2P47v9tzrwziVjAxWT6sqvibExuQXFHD99lsamZmytLZkxfgwxUaOENE5nXCAmKkLjVvxUxnnyikp0Hr87o4MDsbY8wv7jJ1j14CzWPbKUzfviePeLzYCaQG9PnlyyQOttv1hkMkOee3gx3+07xMGEU5iamrBwakwPhXp/EdNeB1ovEppI5giJ+5ayymr++u+NvP7Scxpzum9+spGosCDmTr4/FEtikepFHJI5QkKiF6pqa1mzdIFGh9vc0kJVbR3Txol/mHg/INXL4CGNdCUkJCQGEWmkKyEhITGMkDpdCQkJCT0idboSEhISekTqdCUkJCT0iNTpSkhISOgRqdOVkJCQ0CNSpyshISGhR4ZNpzsUojwxssafv/kPUrtFN9MHH23axua9sXo/ri6Ga77EMljns66hkV+99S+qamqBOyM5xq2B3QAAIABJREFUHay6/vXfPxTCpw61jHXfsST+9p+vhuTYwwW9xF5QqzvUMQln0imvrsbYyAgnB3vGR4T1K4zgUDBuZGgPs8Xt0tjUzJ6jCaRdvExtfQPmpqZ4uDgxO2YcIb7eg3qswSbIxwsLM9NBTzfzcg77j5+kuLSMtnY19jbW+Ht58PhDcwf1OIN1PvccTSAqLEgIiH63SE71mc+X//wOTy9/SIgdLNGBXjrd2MRT7D6SwPI50/H38kCpUpFffIPKm6OE4cyjC+cMepqfbd1NSXkFK+fOwFnhQH1jI1euF1BdWz/oxxpsZkwYM+hp5uQX8uG3W5kyNpKV82Ygk8m4UV7B2azsQT/WYJzPpuYWTpzN5FfPPi581ik5He7cLfm8l9FLp5t+6QrR4cEao9quQaS1oWprY+eh45xKP09jUzPuzo4smzNNEOV9+sMu2lRtvPDIUmEftVrNb9/5iFmTxjJz4lidAr/utCqV/HvLThqamnjpiZWYm5ny8zf/werF84kKDRIlzNSFStXGhZxrrF40X8PJFejt2ed+fUkOb5RX8tq//sOr65/RkEzGp6Sx49Bx3vqfDchkhpSUV7D1xyNk5+YjN5IR6O3JyvkzBSNEpyhx7MgQ4k6cprm5hSBfb1Yvmof5zdHtR5u2YWdtJTjT+lvH2sjMzsHW2opHHrzlYRvh4kR0eIjwd2fewgN8STp7joamJoJ9vXli0Twszc0AcXLFrucTxMlBu5N28TIW5mYaMXi7S07F1CV0iDP3HEmkoKQUY7kRPh7uPL9qMcZyzSDvjc0t/OLNf/CbF9ZohET9f//3PsvnTGf8zchqFdU1fLVzP1fyCrC1smLJLE3jb28y1r4klgAHE08Rl3Sa5pZWwgN88fFw43jKWf748gta6+i3735ES6uSjzdtA8Da0oK3frVB+P5U+nl2H4mnvrGZED/NetEllL3b0cucrq2VpRDTVCy7DsVzMi2Txx+ay+/XP43vCHf+9fX3grZkfEQY5y7n0NjcIuyTnZtPTX19D4Hf79at4edrHkFuZMTHm7dp9X01NjXz3sYttKvb+dmaRzQujE46hZnrHlnK6y+tZdH0B9h5+HgPo0VfGBnJsDAzI+tqrmhFTKfksLKmlpefXMUvn3mc5pYWPvjmB9rb1Tgr7PFycyE5QzPWaXLGBaLDg5HJDKmtb+Dvn32Lk4M9v35+Nb985jEMZYZ8+M0PGpr5/OIblFVW8/qGtfzpZ+uorK5h15H4XvPWnzruDVsrK+rqG3WGJ7xeVEx1XT3/+9Pn+esvf0JLSytf7tgnfN8pV/z5U4/w+xefYlRwAB98s1WYd+2NAwknWTTjAd79zc94aPpkPt++h+raul63v3w9v9dY0F3RVZdZObl88M0PBPl48psX1vCLpx8j2NdLdPzj7qjV8Mnm7ahUbfz6uSdZu3IRsYnJQlzqvuiUWL7z658ywtVZwwKSeuESuw7Hs3jmFP7wk2cJ9PZkf/yJPtP7y89fxMRYzrpHl/HxG69odLgl5RWkZGaxduVifvL4cvKLSzTqZc/RBI6nnOXh+bN4bcNaFs2YzA8/Hh6SZyt3Ar10uktnT8VIJuPXf/+QP374Gd/uOSgEYdaGUqXi8KkUFs+cwqjgAFwUDjw8fxZO9vaCVjzM3wczExONDi854zzBPt6CA0qswK+mrp63P/sWOxtrfvLYil5VMrqEmWJ5atkCLl69zi/++h5v//cbdsQdI6+LGaM7YiSH4yLCOH3uguDLqqqp5UpePuMjOkZA8SlpONrbsXLeDFwdFbg6KnhqyQKKyys0OjsTYzkr5s7AWC7H3MyUSVERXMy53iNPnYit476IiYogItifv/77S3737sd8+sMuklIzaFUqNbYzkhmxYu50ZDJDTE2MWTl/BhmXrlBaUQUMXK7YlxxUG5XVNdhY9fSMdUdXXe47lsSo4ECWzJqKm5MCd2dHZk0ai6mJcR+p9k52bh75JTd4atlCPFyc8HZ35fFF8/p8UNxJXxLLwydSmDh6JDFRESjsbJg2PooQP+8B5RE6VE7PrngIb3dXArxGMH38GKFebkcoe7egl+kFhZ0tv133FEWl5eTkFXAlr4BPtmwnzN+XdY8s66FkKa+qRqVq05DiGRiAn6e7cDEbGhoyJiyY5IzzxERFoFK1cfbCJVZ1uUUVK/D7+2ff4uflwbPLH9IZgb8vYaZYwvx9+esv13O1oIhr+UVcyLnGjwmnWDZnGrMnjeuxvRjJ4diRIWw9eJgrefkEeI0g+dwFFHa2+N6Ud+YVlXA1v5B1r/2tR/plldXCdgo7Ww1Hl5WFObUN2g2yIL6O+8LISMZzKxfz8LyZXLqWx7WCInYcOs7++BO88tyTwvSBws5WQ+Lo6qgQpk2cHOwGLFfsSw6qDaVS1WcQ/k501WV+yQ2Wzp6mMx2xFJeVY2NpqTG14+nq3GOqQht9SSxLyiuYFDlSY3tvd1fyikoGlE8nBztBwwUdgtTOerkdoezdgl7NEW5OCtycFDwQPZro8BA++OYHcvIL8PfsxXCrpf8z6PLhuIhQ/u+/31BdW8e1wmJUbe2MDul4Utofgd/IIH/SsrK5UV6JSx/+KrHCTDEYGhri7+mBv6cHs2PGsfXgEXYdimfmhGgMDXvegOiSHFpbWhDs401yxoWOTjfjAuMibhkf2tVqIkODeGHVEp356k5vUwX9qWMx2FhZMi4ilHERoTw4dRK//8fHHD99lgenTgKgrV1TkqhWqzVuxQcqV9QlB+2OpbkZDU3NOsujqy77MQNDb7KJrlNDIFZ0qSX9PiSWgx37tbtQ1sCgq3hz4ELZu4UhW6fraNfxq9Xc0trjO8VN8WTO9Vu3eGo15OQV4up0q1P0HeGOws6G0+eySE4/z+iQAGEk1B+B38p5M4gOD+GdLzZRUlbRa551CTNvB4WdLaq2NpSqnh2EWMnh+FFhnDl/kbyiEgpvlAlTCwCebi5czs2juaWFweJOSRIBLM3NMTE2prn1Vvsor6oWbnkBruYXoVarhZGqvuSKI1ydKS69/XQ9XZ25eLX3qZuumJqYIDcyor7x1gi8oalZY72tq6OC6ro6Grv8IFTW1NLUfHvn3FXhwLVu8+1i9EAymQx1e//awmAKZYcreul0/71lB7uPJJCVk0tRaTmZl3P4YvterC0thNvarsiNjJgxIZodh46TcekKJeUVfLc/jtLKSmZMiNbYdtzIUBJS0zl3OUdjZNdfgd/D82cyJiy4o+Mt197x6hJmAtyoqOToqdRe59GUKhV/+fgLjp5K5UpeAYU3yjiZlsmeIwmE+vlonU8WKzkcHRKAStXGlzv34+3uqjEdMXVsJAYGBny0aRtX8goor6rh4tXrbNyxT+Mi7Q9i6/hU+nkuX8/vNZ39x0/w9a4DZFy6QuGNMnLyCvh8227qGxsZFXSrfEYyI77Ytpei0jKuFRTx7Z4fGRnoJ5RzMGSgYgj19yG/5MZt/4DNnzKRtKxsdsQdo7isnOKycg6fTNE6EIEOJ1x8Sjptbe20tCrZsi9WY4Qa6O2Jm6OCHw4eoa2tHaVKxff7D92Wkw1gxsRoTqSdIyk1g/KqGo4mp5KVk4vWW9EuKGxtyLp6ndr6BlF3BjC4Qtnhil6mF0YFB3Aq/TzxKWk0NjVjaWGOv6cHTy1dgLmpidZ9Fs14AHW7mq93HRCWjL30xMoeC9vHjQpj77EkrCzMCe0yuT8Qgd+qB2ehRs07n2/iF08/2mOuT4wwM7egmM37YhkXEap13s9IJmNkkD8n0zMpPVKFUqnC1tqS8aPCeHDKpF7zJkZyaGJszKjgAE6fu8CqB2dp7G9tacH/W7ua7bFH+fDbrSiVKuxtrAnx80YuH1gzEFvHu48kEBHk3+vcapCPJ4dPnWHzvlhq6xswMzHB1VHB+sdW4Ndl6snNScHIID/++eV3NDQ1E+rvwxOL5gnfD4YMVAy+I9xxUThw5vwlYm5jGVOovw/rH1vOniMJxCWdxsRYju8IdyaPGaV1+0cXzGHjjn38v//7FxbmZiyYGqMxUjYwgBceWcpXOw/wq7f+iYW5OTMmjOkxSu0vUaFBlFdVs+PQcWHJ2MwJ0Zw53/e02tLZ09i8L5bE1HQszMw0VjD0xWAJZYcrkq5nkNl68AhFN8p5afXKoc7KsKC5pYVf/PU9fvXsEzrXZvdF57rXV55bPYi5Gzhns7LZfTiBV9c/0+NB8P3Axh37aGpqZt2jy4Y6K8MOSdejZ7Kv5bFwesxQZ2PYcPl6ASG+PrfV4Q5HIkMCmRQ5kpq63tfz3iuoVG0cPnmGssoqKmtqOZqcyqn08/fMywr6RhrpStwVDLeR7v2EStXGP7/6jvySG6ja2nB2sGfe5AlEjwzRvfN9iK6RrtTpSkhISAwi0vSChISExDBC6nQlJCQk9IjU6UpISEjoEanTlZCQkNAjUqcrISEhoUekTldCQkJCj9xVna4+pXpDIcoUS1fRoETv6EuimXAmnVff+0TUtmWV1fzqrX8NON7FcEBqf7eHXmIvnDh7jo1dIvwby+U4OdgxJ2Y84yJC9ZGFfuPq5MCou1io197ezpFTZziZfp4b5ZXIjWQ4OdgTHR7C1HGRPcLr3Q1UVNfwu3c/5hdPP6pTbzRccbS3JSzAlwPxJ1k2Z1q/9i0pq2DnoePk5BfS1NyCtaUFPh5uLJ09ddDlqRJ3Dr3G0339pbUAtLQqOZmWyefbdguqmeGCWg1qdTuhfj53JFiKPmhvb+f9b37gyvUC5j4wAX9PD8xNTcgvvsHhU2dwc1YMqXVY1dZ2V3b6g8WkyJF8vGkbC6fHiAowDh0yzHe+2ISbk4JnVzyElYU5lTV1ZF7OoaGpWep07yL02ul2jdo1wsWJo8mp5BffEDrdgUgOdckWdaXZKeZ7YdUSdh1OoLyqmpdWryS/uJTE1HT+8JNnhWOJlQgCoiSW/9i4GTcnR5QqFWlZ2RgYGDApciSLZ04VgqjoEg1q42jyWbJyrvE/zz6BX5fQmSNcnZkwOlwIHSi2bvqSFootp4vCgZbWVjKyc3B3csTOxormllZe7BIwpb1dzW/f+ZC5D0xg+njd1uFWpZLv9h8i49IVGpubsba0YEp0JPMemKB1+73Hkjh97gIV1TVYmpszKtifpbOmCeE0xcokD51I4WDiKZqaWwgP8MXbQ9Mccv7KVXbGHae4vAK5TIaro4JnVyzC/uabSgFeI1ADmZevCoJMXeQWFVNb38Arz60WOlg3J0fCA3yFbbbFHqWwpEwItpRwJp2vdx1g7cpFguTz/z79mvBAP+ZPmShKACmm/Q2G8PR+Qq+dbidtbe2cSDuHgYGmFbhTcujmpKClVcn+4yf4ePM2frfuKa1xcDtli+MiwoRbtb3Hkvjwmx/4zQtrMDQ0FJ3moRMpvPTESmytrWhrayO/uFTjWJ0SwTkx41m9eD5qtZqsnNxeJYKdEsuZE6KxMDcjt6CYr3cfwMbKQsMCHJ+SxurF83l0wRxKyit469Ov8XJzITI0SBANGsvl/Pq5J1G1tbFpz0GdosFOV5yflljFhoaGQkMXWzed0kIrC3M27Y3ls627eXX9M/0r55k0Hls4l8cXzRPq950vNlFb3yA47c5fuUp9U5NGXOS+OBB/kqv5hWx4fAXWlhZU1NT2KaE0khny2MK5ONhaU1Fdy5Z9sWyPO6phIc4vvoG3uyuvb1iLStXGe19uYdeReGGb9IuX2RZ7hFXzZxHi50NaVvbN0KIdSqGW1lY+3rydh6ZNZuzIEFpVKnILijUikRkYGODl5kJ2bp7oTtfG0hKAc5dymDouSmtks0BvT44ln6W9vR1DQ0Oyc/OwNDfj0rU8osNDaFUqyS0qFq6VPUcTSM44z8PzZ+HqqCC/uISvdu7H1NSEKJHtT8w1KKZe7yf02ulu+OPbALS1tSGTyXj8oXkamufu87trlj7Iz/7yLsVl5VrVHV1li508tWQBP3vzXXILi/Ed4S46zSWzpwojEUPDntXSVSLYSde8d6dTYtmJg60N+SU3SD6XpdEZBft6CXl0c1IQFuBD1tXrRIYGCaLBP768Thh9Pr5oHn/5+ItejwtQWlnFhFG6Oy6xddMpLQSYOSGa+JQ0obMUW84ArxFC7FkjmQx/Lw+c7O04lX6e2TEdXrik1AxGhwRiIXL0U1Fdg5uTI54375Rsra1Ayw9NJ3Mn3xoBO9jasHzOdP67dbfGhd8pk5TJDDGWy5kUFcGRLsGz45JOMy4ijCljIwGYHTOOy9fzBUNFXUMjSqWKiCB/7Gw62pOzg32PvNhYWVJx02wtBjcnBUtnT2XrwSPsPpqAj7sbQT6eRI8MEUaUAV4eKFUqoe1n5+Yza9I4Tpw9B3SYV2SGMrzd3QQB5LpHlwrTaAo7GwpLy0lISSdKZPsTcw2Kqdf7Cb12ur9/8WmgY4SVnZvH5r2x2FhZEB7gB/RfcihGtig2TV0+poFIBMVILLs72azMzam+qaofsGhQpHxLbN30JS0UW05t9RszJoLE1Axmx4yjvrGJjOwrbHhCfBziB8aM5v1vvuf19z8lzN+X8EDfPueqMy5dYf/xExSXVWhYH1palcIUgy6ZZHFZOeO7/aD5e3oIna6DrS2jQwL5yycbCfXzJtjXm6iwIKGuOjGWy6m5eZ7FMnfyBKaOjeLy9XxyC4o4kZbJnqOJbHhiBQFeIzAxNsbT1Zns3Hwszc1pbmlh+vgo9h5NpKaunuzcPPxGuN/UKpXpFECKaX93Snh6LzN0c7quzlzOzedgwinCA/wGJDnUJVvsT5pyo74f7PRHIgjiJZaGBn3LCwciGnRysKdIh8OrP3XTl7RQbDm1WTQmjh7JzrjjXCso4mp+IbZWVgT7ePeZ7674e3nw55+/SGZ2Dhev5vLJ5u2E+vnwvJb2cKO8kk+2bOfh+bOICg3C0tyMnLxC3v7sG9q6eLzEiDm7n5OufxsYdFg+cguLybx8lZPpmWyPO8bP1qzSmEpraGwSfsD6g6mJMSMD/RgZ6MfC6ZN569Ov2Xs0kZ+teQSAQB9PLl27jqW5Gf6eHR2xt7srl67lkX0tj/BAP40y6RJA6mp/d0J4eq8zpOt01UCrsmNkNBDJoS7Z4mCKE/sjEYTBkVgOVDQ4LiKUS9euk5Nf2OO79vZ2GptbBq1ubqecluZmjAoOIDE1g6Sz55gUFdFvC4OFmSnjR4WxZukCnnt4MakXLmltD7mFxVhZWDB1bCRWFuYYGBiQNwCpqKujgqIbZRqfFd4o7bGdt7srC6fF8Ovnn8TNSUFK5kWN74tKy/F0de738btiYGCAva21hlMt0NuTnLxCsnJyCfTp0CMF+niSeTmH3KJiAr07PhMjgBTT/u6E8PReR6+dbkl5BSXlFeSXlHLoxGnOZV8R1sL2VyQJumWLA0mzN/orERQjsdTFQEWDU8dGEezjzXsbt7D3WFLH3FzxDZJSM/jLJxu5XlQ8aHVzu+WcPGYUJ9MyKSotZ9Lo8H4d+0D8SdKysimrrKa8qpqzF7KxtbbCxLind8/F0YG6hgauF5UAUHijjLik5H4dD2DmpLEkpqYLHfbl6/karrCi0jL2HE3kelEJNXX1XLx6ndLKKg1rc11DIzcqKgjzv7XyQJfQNOtqx4Pc5IwL5BWVkFd8g/3HT3D2wiUiQwOF7QK8PFC1tXE26xJBPl5ARztKycwS5nNBnABSTPsbLOGprvLfS+h3ne6/Pu04qJEMB1sbFs2YwtzJHbK5gYgkdckWB5Jmb/RXIihGYqmLgYoGZTJDNjyxksOnUkhOP8+B+BPIjYxwVTgwc0I0Qd6eGBoOTt3cbjmDfb2xtrLAVaEQHjyJxUgmE5b5yWSGeLu7suHxFVpHy15uLiybPY2PN20DwN7GmgenTuLrXQf6dczRwQGUTZ3E+19/j6GBAa5OCmZNjCb53AWgQw6aW1DM8dNnaWxqxtbakhnjxxATdaudpGRm4ePhrvEgVpfQ1FXhgIOtDfuOJ1FVU4uBgSEKOxtWzJ3B9PG3DNkmxsZ4ublQXFbBCJeOkbTvCDcMDQ2F+dxOdAkgxbS/wRKe6ir/vYRkjpAYUlpaW3nl7Q9Zs2S+xmqHe5X2djWvv/8fHnlwNqH+t16+ud+FpvdS+SVzhMSwRK1WU1NXz67D8ZibmdzVr1z3h6raWqaPH6PR4YIkNL2fyi+NdCWGhLLKKl5979/Y21izZukCgnzuzlgKEhLdkcSUEhISEnpEml6QkJCQGEZIna6EhISEHpE6XQkJCQk9InW6EhISEnpE6nQlJCQk9IjU6UpISEjokSHtdNe/8Rbnr4h/LXYo+Pmb/yC1W8SswWIwBH/6FGh+tnU3X+3cr5djaWNb7FE27T2o9+PqU4g61HXcH/TZ9u4lGaZeXnKOTUzmVMZ5IZ7unUSpUvHSH//O08sW9oh7+tXO/RSVlvPKc6tFpzduZKiGf+rlP7/D08sfYrQe3qDqFHp+/MYrwmeVNbW89+UWrC0sWP/Y8rteoCmWqppajp8+yxsvPSd8tnH7Xk6kZfbY1s1JoaFZutfRh7BSn+3+XufejiwxCDy6cM5QZ0GgpKzi/7d332FRnfkewL/DzNDrwNAZBhCwoKhgxBIBBcHeoiaW9PWmmN272c3e3b3FzXP3Jhtv9pq4u4nZ3E3UmEQ0gg0sIIpYARsWBKkjHYYmKGXK/gFzZIaZ4TDAAfX3eR7+OZx555T3/DiN94vPdyfAx8MNP1uzDEKB4IkO0ByIjOxrCJZKmAQLDYmnOzYuW6A1zfwpHzClNwqsfPIMe++8eO0mDpw8DQDM6PLrFs9n4k5a2x5hx94k3Cksgb2tDVbERCIsZCzz+faODhw4eQbX8wrQ2aWAt7srVs2PYkakHww2wZC//PgzbFy2AFPHB+P3275ER2cXM1KVva0Ntn6wGQBw614RjqSfQ2VtPexsrRE+YRyWzJ3NjJhkSsBkb6UVVfjLnv2YFDwGG5fGM4NCn7qYoxWgySYEsLOrC3uTU3Hldj6EAj6eD58MeVMzhAIBU8C6FAokpKQh51YeBPzueXQplEocOnUWl2/cxsNH7fByE2Pl/ChmSEFNsOX7r72EfcdOoaa+AVIvD7y5eilq5A1ISElDfWMTAn198MqKRbAzMqh39s08LOoVC6RhaS6Ej7urwc+98+FWvLgwFtfv3kPx/UrYWlv16WN1DU3Yc/gYinoGUl8RE4n9J9KxIiayz9USMHSho2y2cX/YBFYC3UMnJp48g4JSGVQqFTxdxVi/JA7ePdvuwtVcHD93CfKmZjjZ2yFmxnPM+hjr971V1tYh8WQGyiqr0NmlgIfYGStiI5n+wHZ/DPZYGe2GvejOmDIRrQ8fGby9cDAtA4ujZ2Pp3NnIyr2Db5OOIkDi1Z13BeCLHw5AwBfgnXWrYGttjat37uLz3QnY8u6bTKbZYBgLhtT10S/f1nuZdbe4DF/vO4TV8fMQ7OeL5tZW7E1ORWdXF15cFGtywKRGXnEpdvyYhDnhk7FyfnS/A333FwKYePIM8orL8PZLK+HkYIfU81nIzS9E2ITHHf9gWgZuFhRh09rlcHZwQHLG+T7zHD6ViUvXb2HD0ni4uYiQkXUNf9mzH3/Y/DOtiJek1AysiouGtaUlvjt0DF/vOwSVWo01C2JgLhRgV1IKDqdnYv2SOL3r09jyAPKmZvjqRACxdfzcJWxasxy+nh7Iyr2t1cfUamDH3kTYWlvjd5tegVKlwr6UNKP7ZihCR9lu4/6wCaxsaW3Dp//4HlIvD/zi5bWwtrREaUUVVD2JGbfuFWHPkeNYNT8aIUEBKCiRISElDTbWlpg2cbzBfq+rvaMTU8YHYeX8SAgFAuTcuou/fX8AH773ptawnf3tj8EcK0+CEX97YebUSZg1dRI8XcVYNm8OzIVCFMrKAXQPEF1cXolNa5fDz9sTYpEj4mZHdO+snvFLB0sTDGlmxtMKhhyIY2cvIHp6GGaHhUIscsQYiTfWLIjB+au5UKvVTMDfqysXw9vdFVIvD6xfGs96wOa/7fkJ00MnYFVc/wUXeBwCaC4UwtrKEjOnTsLdou516uzqwrkrN7AyNhJj/X3h5izCusXzmdQHoDvD7mz2dSyPicT4AD+49cSu9x7ntEuhQPrlHCybNwehYwPh7uKMNQti4CoSIf1SjtbyrIiNwjh/KXw93REzYxoKZeVYs2Aegv0k8PP2RPT0MNwtLjW4PvKm7gBH3ZwxACgovY+3tnyi9aP7ICp6ehikXh7g8YDpoRNgbWnJ9LH8kjJU1tbj1ZWL4OUmhsTDDRv62TeaME6JpzucHR0QFjIWkdOmIOtmntZ8xvoWm23MRu/Ayl9v3Y6/7vkJqeez0NjygJknI/sa+Hw+Nq1dDqmXB1ydnfDcpPFMoOfJc5cRHjIO82ZMg5uzCM+HT8bssFCcOHd5QMvi7+PFHMtikRMWzJkBLzdxn3QKY/tjsMfKk2DEb3759ux4oDt+xMHWFi2tDwF0h94pFEr860fb+nxOE543WMaCIdkqq6xBfokMxzMv9fld04NW0wMme4SOC0T2zTuYOWVin8BHfYyFANY3NkOhVGrldZmZmWldotc1NqFLodCKcBcKBFr7qr6xCQqFEoG+Psw0Hg8IkHgxIY2911VDc09WM8B29zQbPGh7aHB9unoinfQVJH33dG110oR7Z/MB3TFBmj5WXS+Ho70dk6gLdPcJSwtzg8sDDD50lM02Zqu/wMr7VTUIkHgZLOhVdXJMm6idDD1G4t1z0gDWEUoP2zuYs/fmBw+gUnWPpdW7jwDG98dgj5UnwYgXXd2QRR7vcWCdSq2GrbUVPv23n7NuT8AXQCDg680Re/ioHVYW2lEu/QVDsqFWq/Hiwlite3q6TAmY1Hhj1VJ8m3gUn+1KwM83ru73frbxEED26yYwEsDI0HNA8nQmaq1Ajat5AAAONElEQVQnT980ntFtbmttBaB7/+kWw/7u6QL6gzWZ7zPwtca6wFCFjgIstzELxgIruQqATEhJRa28EZvXvwCxyBHmQiG27dwLhVKpNZ/R/YHBHStPAk7WhM/nM3/1BkLi4Y7Wh4/0BiwawuMBbs4ilFRUak1XqdSQVdXAzUU04OXojc/nQ63SDm+UeLjhRv49g58xNWBSw8yMh9dXLcbkcYHY/t0+FPVcipnCxckRAj4fJeWPt49KpcL96sfhimInRwgEfFTUPg5gVKu7H5RotSPgo6isXGueIlkFPFy1z2QGy0PsAoGAj8q6uv5nHiB3sTOaWh5oXY5X18nR0ak/+w4YmtBRNtvYVLqBlRJPdxTJKgxeonuInZnLe41CWTncXUTMWa6+fq+r+H4FZk2dBC83McyFQiiVqj5XPf0Z7LHyJOCk6Lo4OUDe1IT7VTVoffiI9f2ZIKkEQVIJvvnpCK7nFaC+sRkl5ZU4nJ5pNJk3duZzyL6Zh2NnL0JWVYMiWTl2Jh1Fc2sroqeHDW5dHB2QV1yGltY2tPV0jEVRs3C3uAx7k1NRXl2LGnkDrt3JR0JKGrMepgRM9sbj8fDK8oWYOmEstn+3H/fK7pu0/OZCIWaHhSIpNQP5JWWolTfih6Mn0d7RwYRSCoUCzAmfgiPpmWh9+AhqNZB2IYu5twp0XwrPjQjHwVNnkZtfiOp6OfYdS0NtQwPmRoQb+nqTCAR8BPlKcK+07x+b9s4u3K+u1fqpqGFfuIL9fOHp6oKdicmoqKnD/aoa7Dl8HEKBwGBI51CEjrLZxgCQXyJDtpHnF2wCK+eET0aXQoG/JxxEaUUV6hqakHMrjwnXnD87Atk385B+KQe18kZk5lzHuSs3ML8nvxDQ3+/1bZe8olIolSqoVCokpp4e8D+UsD1WLt+4bfIxMNI4ub0QEuiP0LGB2LZrLx4+atd6ZcwYHg94d/0qHE7PREJKGlra2mBvYwN/iRciQg0nx0ZMDgGPx8OpSzk4ce4ShAIBvN1d8avX1sHV2WlQ67IiNgp7U1Jx/uoN2FhZYesHmzHW3xe/eHktjp45h63/vwdmZjy4OosQ0fO6kakBk323Bw8bly4A38wMf92zH++se8GkdVg5PwodXV344odECAR8zAmfjLH+UggEjy/rVsRE4seUVPzn51/BysICEwL9MWWc9hsdS+c+D7VKjT2HjzOvjL23YfWwvBv6/LTJ2H/sFJbHRGrdY5RVVuN/vvxWa14LcyE+//f3WbXL4wFv9eybj7/aBUf77lfGfjh6AkKB/kv9oQgdBdht48u5tyGrrO5zz1WDTWClg50tPnh9PQ6cPI1tO/cCUMPTVYwNS+MBdB+fG5bE4fi5Szhw8jSc7O2wKi5a6zv19XtdaxfG4rtDx/C7//sC5kIBwkPGDfgdcrbHypHT5zApeEyf+8VPAkqOIFCr1fiv7X9H9PSwIT9LHSpqtRof7diJ+DkzBvRKlSkamlrw+21f4j/efo15j3WkfPzVLkydEIy42REjuhyjSXtHB97/0+f44I0NWg+ER4v+kiNG/EEa4V5FTR1q5A3w8/ZEZ2cX0i5mo6X14bAXs8Hg8XjYuHwhquvkQ952bn4hLC0s4O4iQkNzC/YfT4fEw23EC257RwcaWx4g6jnDD2ifRffKyjHO329UFlw26Ez3GXS/uha7kpJRK2+EgG8GHw83vBA3Fz69Xu16lly+cRtHTmeiqaUVNtZWCPaTYHX8PKP/IUeIIRRMSQghHKJgSkIIGUWo6BJCCIeo6BJCCIeo6BJCCIeo6BJCCIeo6BJCCIee6KI7nKGRo82TEOJpiqcl3FB3PVIyLuCTr78blu8a7qDMXUnJSD5zfljaJhwU3ep6Od7a8kmfkbG27dyLt//wSZ9xVH/75y9wJD1zuBfLJPKmZry15RO8//FnWqMgAd3xLN8fOTHo70g9n4U/6owlMBpdvHaTiV962t0sKML23fvwqz99jvf++8/Ysv1rfJt4FGWV1cw8IxkQOtR9ZmHkLKReyDI6xjEx3bAXXXcXZzjY2SK/RMZMUyiVKC6vgJO9vdZIQbXyRjS1PEBQr0yl0ahLocSxzItD2qZaDSY+hStK5fB933C2zaXkjAv44oef4GBng1dWLMJv3tyAF+LnQuzkiG8TjzLzjQ/ww7J5g8vy0h13lmuafSYWOULi4YbzV3NHdHmeVpyMvRDsJ0F+iQwLe4IFS8orYWtlhemhE5BfUoapPZlR+SVlEPaM2sQm5E5XfyGWtfJGJKSkobi8AkqlCi5ODlg5PwohgQEDWp/o6WFIv5yD6OlhEDno/+8TtqGN/7J2OQ6nn0N9YxPmRoQxESnDEeKpCa0MHRuIjKyraG5twxdbPmAVFtgfQ20PNFh0NIUb3q+uxdHTmVg2LxLxzz8ecMbHww0TgwKwMHIWM003IFQjI+sajmVeRNujR5gwxh8blsYzg7J/tmsv3F2c0dHZidyCIni5ivH+ay8hOeMCsm/egbypGbbW1ggdOwYrYqJgYd43PcFY8Gt/7RjaZwAwKTgQF67laq03GRqcFN0gqQQJKWlQKJQQCPjIL5EhyE+CQKkP9h07xcyXXyqDv48nhAIB65C73voLsdx9MAWO9nb47aaXIeQLUFlXD0tz47Es+kwZH4RCWTkOp2fi1RWL9M7DNrTx1MUcvLdhNRzt7aBUKmFrbT2sIZ6yqmp4uYmxZfMb6B37YCwskC19bQ80WHQ0hRtm37wDc6EQMTOn6f19f+MhV9bWIze/ED/fuBodnV347tBx7D6YgnfWrWLmybxyHesWx2H90ngoe850BXwzrFscB2dHe8ibWpCQkoqktDNMsGhvxoJf2bRjqD9IvT3w04l0tD58xPyRIEODkwdpwX6+6FIoUNyTVlBQUoYgqQQBPt6oa2hES2tbz3QZc0bDNuROg02Ipby5BYFSH7g5iyBytEdIoD/G+HqbtE4rY6Nw+cZtvQNmDyS0cXlsJESO9jAz40EoNP43cChCPAV8AdYsmAdLCwut6BtjYYFs6bZtSrDoaAo3rK1vYJI2NDKyruGdD7cyP7Je93V1KZVKvLpyETxdxfDz9sSGpXHIzS9ErbyRmSfQ1wezpk6CgM+HRc8JQNzsCARJfeDs6IAgqQ9WzY9Gtk7oJRts2jHUHxx7suwadAZVJ4PHyZmuWOQIkYM9CkrK4OftgeLySmxcthAW5kJIPNxRUCKDt7srWlrbEOzfXXTZhtxpsAmxjJkRjoSUVGTfvINxAX6YMi4Qnq5ik9ZpjK83QgL9kZR6Bps3rNb63UBCGwfy/UMR4qnJrtJlLCyQLd22TQkWHe3hhuETxyFQ6o3ahibs+DHRaOKci8hRa6QyqZcnzMzMUF0vZwbT17f/c/MLcezsRVTVydHe8TimpqOzS+8tBkPYtGOoP2hCLDufohTe0YKz8XSD/SQoKJUhUCqBnbU1c9AFSn2QXypDW3s7LMyFkHp2J6qyDbnTYBNiOW/GNISODURufhHyikqQfOY81sTPMxooacyK2Cj88ctvtB4SamER2mgonUCfoQjxNJQI219YIBu6bZsSLDqawg1dXUTIKy6FQqlkznZtrCxho5M2PDDa21R3m9XUN+CrhCSsWRCDqeODYWtthSJZBT795nsoB/CglW07hvqDJpLHzpqGtxxqnL2nG+Tni+LySty6V4QgP8nj6VIfFJTIUFAiwxiJD3NgDDTkjm2IpYuTI+ZGhOHd9S8gduZzOH/N9Ce0nq4uiAgNQeLJ01rpsYMJbeQyxHO4mbJMoynccNrE8ejo7ELahewBfU6jvqFJ67Wr0opKqFTqPmfpvZVWVMHOxgaR06bAzsYaPB6PyTIzRF+fMaWd3ipr62BpYQGxaHDxVqQvzopusJ8ECoUSmTnXESR9XHTHSLxR19iI24XFCO5VjAcacscmxPLHoydRUCpDY3MLyqtrUVAqg4f48QHQXwigPkvmPo/K2noU9yosgwlt5DrEcziZskyjKdzQx90Vi6Jm4dCpDOxKSkZufiHKq2uRV1TKvGWiG6HeG5/Px87EZFTW1qOkvBJ7Dp/AxKAAozl97mJnPGhrY94BrqipQ9qFLKPrq6/PmNJOb4Vl5RgXINXaZv/7j+9x5fZd1m0Q/Ti7vSBysIeLkyPqG5u0iq6lhQW83d0gq6xm7ucCAw+5YxNi2d7ZiV1JKWhubYWVhQVCAv3xQvw8po3+QgD1cbK3Q3REGE72HIQapoY2ch3iOZxMWabRFm64JHo2fD3dcfryFexMSkZ7RyfsbawR4OuNX7++Hl5uhu/Je7q6ICTIH9t3J6DtUTvGj/FjwiAN8fV0x8rYKOz4MRFA93GzMHIm9hw+bvAzhvrMQNvRUKlUuHL7Ll5btVhrepGsnHm9k5iOkiN6oRDAZ8doDzccSRev30JmznX85s0NI70oTyRKjmCJQgCfLU96uOGwUquxfkncSC/FU4vOdAkhZAjRmS4hhIwiVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDVHQJIYRDgtbmxpFeBkIIeWo4O9ob/T1PrVarOVoWQgh55tHtBUII4RAVXUII4RAVXUII4RAVXUII4RAVXUII4RAVXUII4RAVXUII4dA/AbDPLbd1DuOMAAAAAElFTkSuQmCC)",
"_____no_output_____"
],
[
" - The \"vital record\" I used was my birth certificate in Spanish.",
"_____no_output_____"
],
[
"5. After the prompt with all the necessary documents, you will be asked to create an account. Here, a French phone number is necessary.\n6. Then you will be asked to upload the documents. If you don't have one available, you can always log in later to upload it.\n\nSo, one mistake I made here was to wait until I had a bank account number (which took approximately one month) to start with the procedure. As the instructions on the website point out, there are two stages to get registered:\n- getting a provisional social security number\n- getting a definitive social security number\n\nIt would have been faster for me if I had uploaded the documents for the provisional number as soon as I arrived in France. It is only after getting the provisional number that the process for a definitive number starts, so, I really did not have to wait until I had all the necessary documents for the definitive number.\n\nAnyway, after uploading the documents, you will see a \"Processing\" sign next to them. With this, the initial registration is done, and now you have to cross your fingers wishing they don't take long to process them.",
"_____no_output_____"
],
[
"## Getting a provisional number\n\nAfter the initial registration, you just have to wait until they send you a letter in the mail with your provisional number.\n\nIn my case, it took approximately two months to get this letter.\n\nHowever, other students had to wait for four to six months. In those cases, it was better to go to the social security office (called *Caisse Primaire d'Assurance Maladie (CPAM)*) and ask for the status of the process.\n\n- Even though in the websites of all *CPAM*s in France it says that you can only go there with an appointment (*rendez-vous*), you can really just go and wait in line.\n- It is way better to go with a native French speaker to help you, or even better, someone from France, who know a little more about the bureaucracy in this country.\n- In some cases, the *CPAM* had lost the uploaded documents, so some classmates had to show those documents in physical form there.\n- In some other cases, everything was fine, they were just being really slow with the documents. This was even slower with those documents written in a non-Latin script.\n\nProvisional numbers start with `7`, and once you get them you can go to the doctor. However, you will have to pay full price out-of-pocket, which is **25 €** for an appointment with a general practitioner (I don't know how much specialists charge). After that, there is a process where you can mail the receipt and some form in order to be partially reimbursed (I never had to do that because my definitive number didn't take that long to arrive).\n\nOnce, you get a definitve number, it is way easier to go see a doctor.\n\n",
"_____no_output_____"
],
[
"## Getting a definitive number",
"_____no_output_____"
],
[
"Approximately one month after getting my provisional number, I got another letter in the mail with my definitive number. From what I have heard from other students, this was a really short time.\n\nWith the definitive number, you can go to see a doctor and have the social security system partially cover your expenses. For example, for appointments with general practitioners, **60-70%** is covered, so I ended up paying only **7 €** per appointment.\n\nIn the same letter that notifed me about my definitive number, I received a form to apply for a *Carte Vitale*.\n\n",
"_____no_output_____"
],
[
"### Creating an *Ameli.fr* account\n\n[*Ameli.fr*](https://www.ameli.fr/) is the website of the social security system in France. After getting the definitive number, you can use it create an account there to follow your medical history (including reimbursements). You can also use it to do some bureaucracy. It is only available in French.\n",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 12-11-11.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgUAAAB5CAYAAACp41KeAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDEyOjExOjE4IENFU1QSGPkNAAAgAElEQVR4nO3dfVxT990//hcqwUmAFKwQKzctiK3lrtK6CqRatwIqtKvQrSATu6u2Ku76/eh2qbC59ZoWxWur2zVBq9sqK4jbwN6ACrar2nDTXS02ELQ1BptgJWE1Gkywklr5/nFyDjknNwTkzvp+Ph591IST5OQQct7n83l/3m8P8eN7+zEGTp/VjcXLOOUBwN03OpRtx8KDc6TjvQuEEELuAB79/f0T6fw3qi6tqcK1mjMut5mWPhfT92SO0R4RQgghE8cdFRTgm350L38dfR9/4fDHXg/PQuCh54DJHmO8Y4QQQsj4mzTeOzCmJnsgoCQDU2b52f1oyiw/BJRkUEBACCHkjnVnBQVwcvJ3ESwQQgghd4o7LigAmGmC6SUZ3O3pJRnwenjWOO4RIYQQMv7urJwCgat7PwQA+L7w6DjvCSGEEDL+7uiggBBCCCED7sjpA0IIIYTYo6CAEEIIIQAoKCCEEEKIFQUFhBBCCAFAQQEhhBBCrCgoIIQQQggACgoIIYQQYkVBASGEEEIAUFBACCGEECsKCgghhBACgIICQgghhFhRUEAIIYQQABQUEEIIIcSKggJCCCGEAKCggBBCCCFWFBQQQgghBAAFBYQQQgixoqCAEEIIIQAoKCCEEEKI1bc2KDCaLdy/i/a3oLxONY57QwghhEx836qgoKRKCYAJCDbuauLul7fqEBrkM167RQghhNwWbvuggB0BaFMbUF6vQkmVEhKxCD02IwVGcx/8xKLx2kVCCCHktnBbBgVtagOW5tdy/25TG7ggoLxeBaPZgp5eCzbsaoZWb4IsdiZqGzTjuMeEEELIxOfR39/fP947MVRL8mtRuSUZErGIGSGoUyEnNRJtagNkcVIAoOkCQgghZIimjPcOuOvF7ScAAMXrExAdHoDDjRoYTX3Iy4xGelIYYiICEBMRML47SQghhNzGbovpA7mCSRTMy4zGxl1NKFwVD6OpD7K4mQDAjQ4QQgghZPgmdFCwYVcz5AodZHFSyFt1UHYY4Cf2gkQsQl5mNI0MEEIIISNoQk8fyFu7cLXXAq3ehMotyVCqDViREul4W4UOa4pPoLPbjHUZUSjOWzDGe0sIIYTc3iZ0oiG7sqC2QYODW5NdbntP+n5c7f2au924dzl6zBYoO5iVCVq9CZ16EwAgJMgHezYuhNZ6ezyTEsvrVLjQbeZuZ6fM5u2PXKFDQ6vO7n5CCCFkpE2okYINu5qRnhTG5QjERARArujCnk2LBn2sbUAAAIkvHOL+HRwohixWiuyUSMREBHDLF9vUBhSVteCq2QJZ3EzERARgWWLomJ58K+pVaGjVcbeTYqXc69c0aJD9q3cBMIWZGvctp8CAEELIqJlQQYGyw4D0pDBo9Sbu5JeXGe3WY7eve5Q5wQuCAwAozluA9KQwu/vTk8KQnhQGo9kCpdqAmgYNluTX4rG4mViREjnuCYxKtYH7d0+vBbUNGrvjMTerkjfSAACm91ePyf4RQgj5dpkwiYZGswUrUiLxYvEJyBU6yBW6wR8EQKs3wWi2IC8zGge3pGBZQigKVs7jbbO2+CSvF4KQRCyCLE6KHesX4ExlFpJipSgqa0HC6kNu78doWJcZjWUJofD19gQAXpVGgBnpEAYEADPCQAghhAzVhAgKSqqUqG3QICc1EoW58ZAruty6Si/a34Il+bXoMfcBYJYmHtyajMJV8QgOFHPb9fTyeyEMJic1Ekd3pqE4bwE2lDRhY0mzy6BitEjEIuzZtIgb/WCXYLKcNXmi6o2EEEKGY9yDAq3ehDa1ARX1Kmj1JuSkRuK1QXIItHoTElYfgtFsQdO+DG6qobxOhVnpZfBZvM/uCvpwo3bI+yaLk6J5Xwb8vEVIWF2NNpvh/LFgNFuwtvgEACbXQBgoyVu7uH/bBkFDea9avYlLuHRHm9owLgHSYNrUhiG9DyF2xGkojGaL25+J4Tz/WHnj6Flct3wz3rtBCJkAJkROwWubFkGrN6Fof8ugAUGb2oBnNx+zyxMwmi3YVNKMnl7HX7zBQWKH97ujcFU80pLC8OzmYzi4JXnM6iN06k2obdTC19vTbomlVm9Ce8flgX3MjcfaHScBMCMjbWqD0/1sUxuwqaQZcpsERz9vEdKSwlCQO88umdF2uScrJFCMtKQwrMuI4rZnn5eVnRKJnNSBJaTldSocqFc5/LnwsVHhAShcFY/d1e2oadBA2WHA2uVR2LF+Aff+S6vbUdug4e0X+z625y2ARNAEa0l+LTxsbh/ZmYaSKiVKq9u55wgJFGO7kxwU9nW3lZ1CbYOG91lLSwxFQW4875g72tbV/o2XfW+fweKHZ+Geu73He1cIIeNsXIMCrd6Ejbua8dhDM7EuI8rtgMDRibm0Ssn7kmavnC90mxEV7o/XNrp+blvldSpulYLtSoiDW5KxJL8GxXkJvJPdaImJCMCRV9MQHRFgdwIRThHkpEaiqKyFGyEpr1NxJ1BbcoUOS1+qtbu/p9eCinoVKupVOPJqGve+S6qU2FT6od32nd1mlFa3o6JOhfbKLK4zpVywkoL3GL3J6c+Fj+0HsCS/hhf4KDuYq/INu5qx+1C73T7Zvo82tQFN+5bzfma7ygMA1hSfREU9fwqms9uM7F+9yzsGrPI6ldPAs7ZRC6PZgqM70wAMNO0SbsvuX22DBkd2plEBLkLIhDKu0wehQT44uDUZ/f39TufHWa4CAoDpjhgV7o+ocH/4envi4JZknKnMgun91WjelzGkL9/QIB8kvnAIWZuP8YaHYyICcLoyGyXVSi6Zz2i2MFesw5iecIcsTurwitL29diTq8zmJHu4UePw+Z7dXM+7zR4v29vsyVCrN9kFBMIT/bLE0FG54m1o1fECAmDg/bnzu1R2GAb9TAkDAlslVUre7ZoGDdbuOOl0JMrX2xN7Ni4EwHwmsjYfc7otwAQHa4pPutw/QggZa+M2UlC0vwVtagMKV8VzNQKcYb9knQUERfuZK+TC3IVYu+Mktq979JauwGRxUqxdHoXdh9qxNL+WVx9AIhbh6M50AMxJM3H1Ie7LPzo8AEd2po36sLDRbOFd9bInS1ncTBw4dg4Ac8Vru7ST3V/bJZu2w/FtagOK9regcFU893PhfPnuDQu5EZKaBg1Kq9tRvD5hhN8dn6+3J2IipvPuY0dFQoN8sC4jCjERAfATe6Gi7iwviKmoV7kc0fH19sSKlDlITwpDed1Z7tgBwOEmfpC30WZqA2CCpx15CQgJEqO2QQM/sRd3rEurlLwpDdvjbDvyouwwcGW8CSFkIhiXoMBotqC8XoXYiADUNmjssuqF1mw/wX35C9U0aCBv1SEpVorSamZI2d3aBq7sWM/MKz+7uR5Zm9/lnezZ/wuvBpUdBm4VxWgSTh04awzlqK6BrYZWHYxmCyRiETM9IqgaKVwC2dCq494bW+NhNCXFSrkW2UJN+zLs7s/LjEZ5vYobYRBOFwgd3ZnOfaZkcVJeUGCrpkHDS1z19fbE0Z3p3OsLj3G5zQhEcKCYN40j3MeaBg0FBYSQCWNcggKJWISmfRkorVLCaDNv7wg7jOvs5MaenJjaBl0Oex5o9SYcbtSix2xBVLi/2yczWZwUxXkJWLvjJJbm19qNAtxKtvutkCu6eLfZ4xca5IPgQDF3AquoP8c7bsKfKzsMiMqqRFpSmMNiTcLb7Fz4itRIXoLhaHEWEAADgVlNgwYNrToo1QZeToI7hEFmUqyUF0iwV/FKwYjJipQ5TvfLaLbwAgiJ2Avbyk7xtvGwSXdk8yQIIWQiGJeg4NlfHkO6LAzrMqNdDrUbzRaUVLejaV/GoM8pi7NfslfToMHu6na7k0Xj3uVuTy+wV8Zrd5xE4upq5GVGOy2FHGzNyB9ttTb5An7eIizNH0gcvGrmj1ywIwEsNlmSnUawTTCUxUqxe+NC7r2FBvlg94aF3KoGdvvS6naUVrdjXUYUCnLjR2W6JCnWcS4Fq6ZBg00lzbxh+tEiDP5cfXaEAYSyw0AnfkLIbWPMEw2NZos1uRCDFhQqrVIiJyVyWCcdtm+Ao6tH4ZX2YHJSI5GdPBud3WZsLGlGVPZBJKw+BInYi7fdL1aNzgnSVk2DhpcX0NPLZO2z/wmT24RTDTERAWjal4FlCaF2zy1v1WFpfi1vPX1OaiSOvJpml2AIAKXV7VwdhbFUtL8F2b96lxcQRIX7o2DlPF69BkIIIUMz5iMFtQ0aVNSruOQwV8rrVW6NEjiySZAYZqu0uh0rUp0PATvy2qZFSEsKw5riE7ja+7XDq781xSe5ZD1nLZ5v1VDLLjvKcWBXfWj1JpRUtaOi/iwXaHR2m1FRd5Y37SCLk+JoXBrkCp1dQl5to9ZpstxoFOsxmi3Y9teB4XhmpUkK9/ryVp3D0s+3Qjgq5KpgkfA4hASKscfFctiQW6ifQYZGrtDBw6ZQhW2g+83Nfrz0+0a8/cF53DNDjJeyYpHxePg47CUh42vMgwJZnBQ5qZF2mfGOnKnMGtZraPUml8PKnd1myBVdQ06UYzo4ZmPjrianSWmd3WZm/XudCgdczIkPl+3Uga+3J/Iy7HMtyusH2jELs+jZE7VELEJokA+XUGlbu0CrN9v8e+D3xE7RxEQE8LL829SXHAYFwkS/kagIKRyez8uIHvVEvWjBdMGBehUKBaNCttM0UeH+XCJhZ7cZftZETjK+hPU5bBuHvfPB5/hL7acAAMPVPuT9zwd4UnYvPKeMe9FXQsbUmH7i5QodN5zt7IrXaGbWbz+YVYml+bXDau7jJ/ZCVLi/y22EmfXukohFeG3TIuzesNDldvJWHbI3Hxvy8ze06pwmMAobIMliZ6JwVbzdf7JY+9wKVtbmY4jKqkRFvcrplTx7cjOaLViaX4uE1Ydc1mFglwz6CQIgZYcBG0uaUVGvwtL8WrsAZSSwx0urN2FjSfOgKw6GIz0pzK6XRuLqaq7t9caSZkRlVXKfaWGgtjS/ljveRrMFtY1aLM2vHddmW4Rv0mQP3u0b3/SP054QMr7GdKQgJEiMorIWeHh4oPXcJYdL9+SKLq6oTGe3GUazZchX9BKxCHkZ0bwEOVvLEkKHnBBoNFtwuFGDZYlh3FW2M2yGv7xVh5IqpctlgWmJobwTWVFZC/r7+3n1Alj2SxEdXyGnJYXxh/gbNEhPCkNJlZJ7LaZwjuPjwx6bNdutpY27zchyEuD4entyV9IxEQG81Q0AuGWiI0V41S5v1SEq++CIvoYjxXkLkP2rd7nb7IiQrW1lLZDFpSEnNRIl1UputGCgUJHgeFu3J+MvLTEM339kFt776AtMnuSBP7yURKME5I40pp/60CAf7Nm4ENHhAU6L3jjK3h4ONjkwKVaKgpXzeCMHEh8vbCppZurSu1GJcGNJM4KfLMPGXc14MOvAoFd4hbnxyE6eDWDwk6Kj4MTZMHuNoEqhs/oOwvuH0iCpYOU8bqg7xI0lh3s2LuINoztaEspij8mtkIhFdq2xWb7enqOWaJieFDbo6FCr+hI3+nJ0Z/qgo1Wt6kvjtqyV8E2e5IE3i5fg//6SCc1bK0ctJ4iQiW7MRgpKqpToMVsQEuQDWZwUPeY+h/PtK6xXWbYZ9sPF9lKoqFfxluoJy9uGBIodJgdW1KtQtL8Fnd1mJMVKERIoRl5mNGIiAlwGBm1qA17btAi1jZpBl8yFBvmgce9y3tC3cBgeYOesvXjJUc7mqSViEbdagqXVm5CXGY20pDBU1KlQ06jhrmSDA8WICQ9AXiZ/fn7H+gXIy4xC0f4WXgJfVLg/YqwNi4QjJulJYTjwmydQVNbCPT9b/c9PLOI3VbJ5rJ9YxHtv0eHO5+ALV8XDTyziigD5ensiLTEMhaviUVLV7jSQdLSCwpbwNYW/h5xUppZD0f4W1DYOrAIJDhQjJyWSt8RWIhaheV8GyutU3DQDKyrcH+mJgy/JJWPvgbC7xnsXCBlXHv39/WMyeWY0W7AkvwYe8EBMRIDTCoUAs+SMzTDPTp49aKMkZ9rUBmRtPub2Wnb2pN+mNnCd7YIDxXht4yL7tsVOGgsBzMmlad9yLFhdDQBoHuYKCkLGwmNr3kTllmSXXRJrGjQ43KiFXNHF/T256qy5YVcz2m2Cs+15C+AnFmFb2SnuOWSxUqzNiOKmB9mukuyS4ZiIALvOk7aMZgu2lbXwOmXKYqVYkRrp8ErfZ/E+3m3bREO94Rqe2/o+dztilh/++DOZ0+Nhi22U1Y9+3koYQm5HYzZSIBGLcHBLMjr1ZrSpLzn8Q7dtNcvy8PBw2QbYGa3e5LBLnZDtHDhbh4BVsHKe06s5V8O+7JVqe8dlrF0eNaT9JmQicRVYu+r4qOww8EZHSqqUTFVRm79HtrYGOy0j7EDJrBLSoXJLst2J1lkXSq5mh0LHNahyx1d93/D2191EZKPZwoyKWTuFEnuBS1/Htes3cOwP6VgQHTTeu0MGMWZBgdFsQafeDKO5z+FctVyhc9hZbrhtZrM2vztoQAAAHgAuvJMLuaKLu+Lw8xaheP0Cl8mEnYPMBbMZ/2PRYvnbSqG6hIZWHeIipyMhOgiTJnkM/iAyovzEIhjNfS636em1YFNJM47sdJ406WwJL2AfDAife5sgIZNdFePq77uiXsVNC44mpdqA0CAfCgjIiPuvPzZhz5unsXzRfdi9YSGmTR2b0/WYBQVKtQG1jRq0qQ3w8xbxVhS0qQ1Y+lItr4WvrZ5epktio4MmOI5s2NXsdoJiZ7cZSrVhyA1+BltzXzFI296JxHa6BmBGSEb7yxQAluTX8q7Ojryaxl0RHnz3HFZvO8H97GfZcXj5+UdGfZ8IX2iQD/IyorHtr6eQnTwbaUlhkIi9rEtAm7i8CnmrbtARPTb3Q1hgij25BweKIYuV2v1cbtO4C2AqodoGBOyIXqfehKL9LdzS19LqdsrbILelP71zBn+p+RS/WT0f+VmxY/raYxYUyOKkaFNfQkFuPCrqzvJ+VrS/BQCTsZ34wiG7xy5LCMXhJi3WFp9A5ZZku5/bKq9TYfehgYz/7esexYrUOUhYXe200p1wmZs72gYJOtgvpsGGIYVzr7ZzrI48+8tjuGrzhbg9b8G3sjDO8ZaLvNtHmrQUFIyTdZnRWJEayRs5k4FJFuYXsXIeFESF+3OdJY1mCx7MOsBLJrb9uVZvsltmqlQbuIDRdtQhKVbKBbCSiADs2bQIwU+WAWCCjdHsWqrVm6DsMKDH2src15spUsVOLfqJvXDAmtS8LuPbPY1445ubmDKZlnAOR38/cLO/H5NtRkKff3Iunn9y7rjsz5jWKWDX6wvX7cdEBOBwk9bpPH1eZjTzB96oxZrikw7nCo1mC3ZXt6OojAkw2Pa27JdUTkok72qYNVjjHUeEnfAcKVg5D9v+esrhSgJbwrlXrd7kNCgor1PZFQAabhGmie6J+cG8L/+UR0PGcW/ubBKxiDuZH27UQK7QoVNvshstczWllm6t78E+X0zEdN7n3vbnoUE+8PX2dLgCSbjqxwOw60IZEijmciAGm+a7FXKFDm/UnUWn3oRX9rcgOjwAO9YvQEUdU6hK2WFAdHgAQkehlHXV+x14buv7WP3UXIRJfbD3rTPQG67h/jAJ/nv1fHzv4Vm87T/VXMGWv3wMeasO1/tuYHawBC/8YC5WLbuf2+b5ouP423tq5GfF4jer59vd/58/jMEra76L7svXEJFZAZ9pIry9Ywl++qocesM1aN788ZDew5/eOYM9b57G5xevwt9vKtKTwvDy84/A13vgO/P/znSjaP8pfKK6hG9u3sT9oXfhpaxYLHXQu8VWj9mCVytb8bb8c3R92YsZ/t/BkkdDUJA7D/6+U7ntLvzbjFdeb8GJUxdx5WofQqU++PGSOXjx6QchsqlTUfnuOZRWtePcFz3wmeaJ+PvvxubnHsaD9zHLjssOf4b1v5Pj4Qdm4HjJU9zj2PsfipyOD/Y8DQCYl/t3nLvQg+rtqdhd3Y4PPunCB3uexoP3+ePa9Rt4ZX8L/vFPNQw91xEq9cGqZffjp8/E8Ep0D7Y/t2JcuiQKFa6KR02jxmW/gsotyViz/QQq6lXw9RbxetTLFTqsKT7BfRFEhfvj4JbkUWvtK6ylIGy5CzD1B6IjAoZ8Fd/ZbUaNtdiQUEm1csj7ervKXBwOy42bePuDz5EUK6WEzXFkNDM5A8KlvKNJGDQ4wyYWjocc6+hJUVkLjgryKXYfaudNh42WN46exVSvKVg0byZUnUa0njPgmcJ6HC/5AWJnM989p89fxhP/WQPTNQumS6YiJMgH7ecN+Onv5Pi8y4T/Xj28ETjTNQsyCurQY7Zg3v13D+mxv3ztX/jD39owZfIk3B8qgVZvxr63z6D1nAHH/jcdkyd5oEmpx7KXDuObmzeRGCPFlMmTcPKTi/jRL49h189kyLUJaGxZbtxEyv9fg9PnL2PGXd9B/P13o/38Zex58zTkrTo0vPY0pkyehMtXr2Nx3tvQG67hgbC7EBMRgA8UOhTu/hCt5y7hT4WPAwB+d0CBl//0Eb7jNQUL581E15e9ONyoxfsfX8TxkqeGfSJ+/pXjuGLqg5fnZIQEifH1jZt4asMRfNjeDfE0T0SGSHC204hf7PkX/n3lK2x98bujuj+sCTPec3RnutNiOTUNGmb1wtZkFKych92H2rH37TOoqFdhTfFJLH2pFp3dZgQHirF7w0I078uwCwicjUK4Wg/vjPC5VqRE2uVDxEQEDLkSI2u3g4JHcoWOW/d/p8hOno2/bU3GT5+JxpTJlGQ4HozmgZLOLF9vTyTFSkekGNXQ98d10uNEkZ08e0yWJoo8J+Nk6Q/wxq+/j+Y/ZSAtKQxf37iJ3x1QcNv8/I9NMF2z4KnH7oW6Kgct+5/BP4pS4eEB7DyowKeaK8N+/ZVL5uDL+p/wro4H03rOgD/8rQ2TPDxQ+7ulaP5TBpQVP8KsGd74vzPdOGodDf1Lzae48c1NrHk6Ckd3pqHmt0tRtPZRAMC+d844fX5lhwHXrt9A3OzpaCv/EY7uTMOpsmcgnuaJ0+cvo7FNDwCokWugN1xDTEQA/vXnTPz9lRTU/z4dAPD3f6q5Udi9bzGv9edfPI5/vJKCxr3LsSwxFF/13UD5LeSORYX7o/PtlbhU/xP4TBNh79tn8GF7N2bN8Ibirz9E858yUPf7NEzy8EBpdTu+NH41qvvDmhAjBQAzpHh0Z5rdWmKAibrzMqMQGuSDwlXxSEsKQ8LqQ9xwSlKsFCtSIp3OHWr1JqfZz8MZ2usRfDGlJYXxCtQMViRnMI6StkqqhjZKUNPAFCdqU19Cj9mC6IgApCWG3dIXFTt83Kk3c2vJZXEzkZ0ye9BRmfI6FQ43arh9GWyOVdlhQI1cw91++IEZSP5usMN9OlCvQpvagE69CbK4mViWGPqtzLMYDxt3NfGWI2Ynz0bx+gRIxCLIFTqXqwpGg7Bd+bKEUJdlxMerC+VojVIK3XO3N+67xxcAMMnDA/nPxjCrtZq06O8HDD3Xue+lX/3kYW7eOuW7wVg07x4cb7mIt05+PuyiTflZsbxhdndUHe8AACx++B4kxjDfRwF+U7E0IRR73zqDJqWeS2gFgHMXjDCa+iDx8cKapx9E1hOzefPvQvFz7kZb+Y8AgDsGN2/2Y9bdYnymvYKLX/YCYCrbAsAl43Wcv9iD8Fl+iJ0dAO1bP0Z/P+BjvdCT+IjQdakXn6guIfXREHhOmYTXf7kY167fwFTR5CG9d1svPPUg7vIZ+Dz/459qAMDqp+Yi0H8aAGD+3EDMCZXgU80VfPzpv7FkQeio7Q9rwgQFLNuheNt5xYo6FZdQ5CcWYarXJESHT8f+zYsH/QNcW+y4xj8w0MxnKGwf4+ctsstJCBmBUrslVUquaJNWb3K7mVDR/haUVrc7XL9dWt0OWax0yN0b2foRjoaP5a06FJW1YF1GlMMSx47WuctbdaioU7nMt1B2XOblgLzwgwftggK2aIxw7XtRWcuw3iexZzs0HxwoHnYhsZEiTApWdhgQHRFAv2er+0OZk3vf19/g31euodPa8dRzyiTMDpbwto0OD8Dxlos439Uzpvt4/iLzeu999IXDi8ArJuaiKz8rFvX/6sR7H32Be5eXIy5yOp6YPws/XjIHwTNcf8e+I9fg1UoFlGoDLDdu8n7Wf5Op17c0IRQpj4ag/sNOxK38O2YH++F7D8/CD78XgUfmzuC2L1r7KFb8+j38T/knKK1uR2J0EJIfDUZOSiS8v+N4xdxwnL94FQDw630f4df7PrL7+RWTZUz2Z8JMH7BkvDK+Aydf2x4CoUE+uFT3Hzhe8tSgAYFc4XrO0VW3QGdsv5h6ei12jx+Jq4QDx85x0xQlVe41FZIrdNj211Mu12/LW3Xcag93VVhL9bpSWt1u19HSaLa4LHzjbqVJR8rrVFi746TT9zrcLpXEuQvdZi65UK7Q8Qp9jRWJWIRlNklmnd1mZG8+xpvS0+pNWFN80u7vUtiLYiRaeU80Is+BK8Vr129w/3Zn8s1jjGbo2Bq6s4P98KPvR9j9N39uIABmFOTj/c9g94aFSF0QgrNaI7aVncJDP/473jx53unzVx8/jxW/fhftHZfxsxVx+McrKTi0PRVhUv73sueUSagqYn724yVz0PvVDex58zQWr38bBTarar738CwoK36E36yej7jZ03H81EX8/H+bELfy71B/wQQ4HiNw8NjSwokxUofHhd1/d/bnVkyYkQKt3gQ/sRdWpA6sEvDzFnHLEXt6LcOqbMgOcztTUa+CXNGFPQ5KGTsjEYsQFe7PzfELEw+dNSpyh22FxYo6FdZlRnPLmgA4zcpmXlfKjbT4entCFjsTMREBaFMbeCMNuw+1o3BVvNtXV+syo7l+FOxa8tAgH17/BIDJArfNoyitUtqd+NkvdHlr17D7W7CJbyxfb0+sSFrL3psAABUcSURBVJmD0CAxahu13EiTvFXnNGmTuCcmPIC30sbRkuGxVrgqnvf5YTtl+lmz1tlAUVi8KDTQh/d5TXzh0JCSAbV6E3rMlgk9NXVWawTATCXcM0MM8TTmytFy4yY6LvYgYpYft+3p88yxCJMy0w9eIuZ0YBqBvjOusBdNYVJfLpnPkdZzBnxz8yZ++P0I5KRG4sY3N7Gt7BR2lH+CgtIP8fTC+xw+rvIY83354tMPojB34PcvLP5z/uJVGM19iL//bjwxnxmFrP/XBfywsB67qpR44QdzERgwDZ9prsDLczLys2KRnxULo6kPP3nlON79vwsofuMU9hU8Di/rsL3p2vBXg4UGiWE09SHlu8FOaxNc67vh1v7cigkzUlBRp0JUViVKq9u5Tnc9vRZm3bH19mAneEfcqUHQ2W3G0pdq7ZY2uZKeGMb9e7Blh0ORZvO8pdXtXA8G1oqUOS4fX5y3ALs3LMTpymwc3JqMwlXxOLg12S4pbCjHUiIWoTgvAQd+8wTOVGbhtU2LULgqHs37MnhdCYUFo4SrJbavexQHtybj4NZknK7MHrSLoDMVdWd5x2TPxkXW5k3ROLozjZfTcTsVkZqIXBWxGq2OlIOJiQhAcV6CXXJvT6+F97koF4xurXDSqt0dRrMFUdkHkfjCoQk1wqDVm/CJ6hIAplbAtr8yo4CPzJ0B0ZRJuFvyHW7e/pX9LbhpvUw/3nIR77d8AQ8P4AeP3QuAuXIHgOOnLqL3KyYw0OpMaLIm5o2U5YuYk/l7H11A/Yed3P29X32N6uMDIwA5L7+LhWvfwjsffA4AmDJ5Eh57iLngYqcYHPG0jpZ81TcwUlJRp8KZz/kJlb89oMDCtW/hv//8MXdfYnQQNy9/2dSHyz19WLj2LTy25k1c+DcTHEt8vDBvDrPa4spVZj/YY6e+0IPPtMzrXLt+A2/b5EUNhj0upYfa8XnXVe7+C/82431r3RZ39+dWTJiRAnmrDj29Ft40QUigGBKxiKsxMJw1+Y4y+Z0pKmtBTYMGlVueGHQKQBY3kxvRiIkIQHR4AHeFeivJfHmZUVzxpZ5eC9buGMiHyE6ePejVfYxgGaRcoYOyw2A3vMVWcXSXbRKn0WxBe4cBcoUOYUE+/OpzCh1kcVLIFTreSECwtdkUi1n7zk8ac5dtu2tfb0+79yGzyUtxZ1kbcS4mIgBHXk3Di8UnuN8z2xUyLSmMN4Vgu3pIuKpHuLJoqD93t2MlMDBylJfJT2ZNTwrD9nWP8gousVMM3/GazAsmba+oAebzygYhI3kRcKv6LN/ge+vfRkx4AL74shfdl6/BwwP4hU0w9z8/XYAn/rMGVe93QK7QYbpkKj7TXEF/P/D//TCGW8KWsSgcRa+3oOOLHtz/bCWC/Kfh3AUjb13/SHhk7gy88IO52PvWGTzzi3rcH3oXpk2dgs+0RvR+9TXmhEoQdZ8/Nv54HtbuOInni47jr0fPwvs7njh5igninnOyHBEAli4IQW2DBvvePoPGNh3M176G0WxBaJAPtHoTzNeZz0peRhSqj3fgLzWf4pOzXyJU6oNTn32Ja3038FDkdDwUOR2TPDyQnTwbB46dw3d/Uo3HHpLi2vUb+OCTLnh4gKvz8FDk3Yi6zx/t5y9D9uKbuO8eX2j1Zkzzcv8Uuy4jGm9/oMGps19i/k+qMPdef3x94yY+1VyBv68XPvtbNmbN8HZrf27FhAkK2tSXULByHkKCfLir4zTBl/1Q/xhLqpRDXsOs7DAgcfWhQXstsFnN7BfFSA0phgb5OKx7AAA5qXPcurKRK3QorVbyTp4jobxOhdJq562JbQmXjo1kNrbtsbna+7XDZCWWO/0viGuyOCnOVGZxJ1DbwFS4Pp9lW0dkNH4OMJ8pNvGxTc1UFvQTi1z+LeZlRiMvM5rLQWA/l0EB05y+F9bFmlW8cssAc2yOxvEfNxYlwlmRIRKsfmou/vC3Nly5eh0PhN2Fgtx5eDz+Hm6b6PAA/POPT2LL6x+joVWHc51G3B92F55/8gFe1bxZM7xx5Pdp2FTSjDa1AUZzH36WHYcAv6kjnj/y258m4oGwu/Dndz7FuQs9mDJlEubeexdWLpmDufcyyZI5qZG4525v/LZCgTa1AX1ff4OIe/zwXNr9WJXm/OSXkzoH3Ze/wv4jn+HzLhPmzbkbB7cuwO5Dp/HG0bPciMGD9/mjae9ybHn9Y/zrdDc+1VzBPTPEeCkrDi9lxWKS9UKqdMNCzH8wEK/XfooTp7owVTQZjz00E/nPxnLH2cMDeGvHEvz8j0344BMdLnT3YumCEDyX9oDTbrpCU0WTcfh3y7D9jVN488R5tKkN8PUW4XuPzMJPn4mGp3WVhzv7cysmTFDAzOnPZEYGnCwtHMpKAaatquPpAHYJY1FZi8PKhD29FiS+cAi7Nyx0ui/slwm7TyM5z7giJdIuKAgOFFuvwF0HBS9uP+FwmZirXITBsA1oHAUDzp5XmGfh5z1xrq7I8EzkDP+h/v0NN0idiMdg9VNzsfop1yVxH7yPKeg2mPg5d+Pd/33S7n7bJcSB/tN4bacH033kObv7PDzcK+X7ePw9Qz7ReXgAP18Rh5+viOPdX/pfj6H0vx7j3Rc+yw/7N3/P5fNNnuSB/0h/AP+R/oDL7QL9p+GNX3/f7n7hsTpV9kOnzyGe5omtL36XK1R0K/szXBMmKHA1lM2OEAylcImzLmpR4f6otC5Vy0mNRHmdymlwsHbHSYQG+TicDmDLrbJlVEcyKMhJjeQ1mwGYMs2DKalS8gKC4EAxCnOZug5Ka9Op4cjafIwXECTFSrHO2qNB2EyJJczlGMkrdmEgUrBy3og9NyGE3MkmTFDgjm1lpyARew06Z7+m+KTDq1rbpiusnNRIl8HBmuITOF2ZxbuvpErJjULYBirOhv2HIy0xjHeCd5QkJSRMrGpys6ukK0ZrsxcWcwxdD7MC9kVmhCMHt0JYAjctKWxCZ4QTQsjtYsKsPnAlLzMajXuXIylWinJBh0Vb7PpkR2vqk2KldgGBrZzUSJypzMLuDQt5WdWd3WZe6cgXt5/AptIPmSTA5VE4XZl9C+/MOdukvGUJoW4Nd9o2fxEWVXJ13IS5Grbru4UNZWJsEsCMZovTvAVZnJSXHd7Ty5/OkSt0ww4U0hL5zVAcrUnXOmjYQwghxLXbZqQgJiLALvmILbtb26Bhytw6KYazdnmUW4lLABMcpCWF8Vq7bitr4XILitcnICd1DkKCxLwTdZt6oNvhcOopCLFZ34D7pVptr6B7ei1IWH0Isjgpahs0LgsFCXM12LoIO9bbt2U+cOwcrvZa4Cf2slsuKSQc7Sgqa0F53Vn4ib3cSlZ0ZkXqHBSVtXC/H2WHAVFZlUhLCkNokA/kii7IW3WQxUpxxI1RDUJuN5mLw5G5OHy8d4N8C902QYEttk1ySZXS5UkpOFCM4rwFQy5eIxGLeCc0drQgJzUSErHIbvqiTW3A0nxmvn5ZwsjV3R/q0kZhgqKyw+DWyTc6IoA3T9/Ty/QTYAMpdgkMy91VDcXrE+yWi3V2m4FbqGQIsH0y0rEkv4a3z8IRInmrjlsiSQghZHC3xfSBrdLqdkRlVaKorMVpQBAcKMb2dY+iaV/GsKvZ5aTyiwRtK3NeGrhoP7MvUeH+2DOOteFzUiMdthhOipVi+7pHnT6OLU5ky/bYFq9PcNjkKTt5tsuWxuzJ21GRomUJocMuXgQwIylHd6a7bD6VnTzbreJVdzrPKZOG3NSGEPLt5NHfz1ainpjkCh3WFJ9AWlIYV4hHiC29K4ubCVmcdMTWxC/Jr+VdebtaojjcKQN2fTVrsKtard7ENTkB4LAZTJvaALmiCz1mC5eEZzRbeHP4wukP9rnlCh069SaEBPlYO5UNPLdcoYNc0QU/sYgbqndnfwCma6NSbYCfWARZ3ED5Zdv3bvvYf1/5iivZCgAz7/ZGuLUbnJDt+2Wfh13eSgZ3yXgd0yUjW6CGEHJ7mvBBgaN196MVBAi1qQ28IWo/bxHaK7PoZEMIIeRbaUIHBUazBVFZlejptXAFh4RBQHmdCgfqVSjIjR/RuWPbzHXbwKBg5bwxrVhGCCGEjJUJHRSw7XGZ5YTOs8jlCh2KylrgAWY9/7LEsGFdzbOrGSrqVOgH01yIHeZmAwMaLSCEEPJtNaGDgiX5tWhTX8Lpymy3TsJtagPK61SobdQgLMgHsriZiI4IQHS4v8MpBq3ehAvdZsgVOrSpL+EDRRfSEsOQlxltlx9gGxjQUjdCCCHfRhM6KCja3zLsanVs8hlbv8Bo7uP1Uvf19kRMxHSEBDIJd+68Tk2DBtm/ehcAs/xve94CGjG4TRnNFmwqaabfISGE2JjQQcFEI1foeP0D/LyZjmwhQT4oyJ037IRHdsTC1fI6gAl0El84hMa9yydsWd+i/S2Qt+rspnuc3T9eXtx+AgC4Lnujhf3MDKWBDCGEjJfbsnjReJHFSZGdPBt+Yi9IxCJmmV+HAUmxt7YCYuOuZshbu3B0Z/og7Zp9sHvDwgkbEExUPov32Z2Uc1LnUFEjQggRoKBgiEb6ytJotkDe2oW8jGiU1w1UEWxTG7CtrIVbe5+dwjRuqqhXcbUSluTXYl1GFHZXt9vlOBTtb0FDqw6yuJkoyJ3HbV+YG4/SaiVCgnxQnMe8Vk2DBrur23kjHnKFDtvKWuAnFqEgNx4xEQEor1Ohx9wHZcdl9Jj7UJAbj9LqdnTqTdieN1ASucdsQdbmY+gxW3j327J9zcGG8Jfk1yI9KQy1DRqH72dbWQuO7ExzuM9F+5miU0vza7l9KalS4nCjFrWNASjIjYdELOL2h8Ueb3bb6IiBbTfsaoYsTsqterF9f+zz+IlF2J43UFqbXSXDPi9blZOt+7B74yLuGLC/u+iIABTnLeB6b7CNu9rUBuxYv4D3+xD+DgghZDiojNk4q23QQBY7E7K4mTjcqOHuX5Jfg2WJYViWGIor5j7uqta2mFJDqw6l1e28kw/AnFTa1AYU5MbjA0UXd1JpaNWhpEqJ7JRIlNed5do/Z//qXWSnRCIpVgo/sReMZgteLD6BtRlRSIqVYmNJMwCmOVJRWQuSYqUwmi3WfQyFr7cIJVVK7vWN5j5kp0QiKjwAWZuP2b1nuUKHjSXNKMiNR39/PzbuanJ5jBpadejv78fajCjsqmrj9ruhlVl1UpAbD63ehKzNx7AsMRRJsVIsya+B0Vq8CQAKcuMREuSD8joVyutV2J63AFqdCRXWRlFrik9ge94C3vFmty3Ijedtq+wwoKisBdkpkbyTMFtoa21GFJYlhsHPplNkp96EpFgpNlmPJQD09/ejIDceRrOFe+4Nu5pR06hBQW480hLDuMeyjak69SaugFen3oTyepXD3wEhhAwHBQXjrLZBg1CpDzw8mL4AbH0EP7EX/MQihFinJZxNT1RuSba7OpS36tDZbca2shbeCQVgui+mJ4XxmiAtSwhFaXU7PDyY0sRyRReuWq9kDzdqeYFITMR05KRGQhYrRUzEdOtzBfAaLoUG+SA9KQw5qZEOGzHVNGjgAaZ0tLLjsstmTY72W67o4u4vtNanaFMbEBwkRl5mNPIyo+En9oJc0cUdG1mcFBKxCLUNGly1JhkqOwxcgMEKCfKBROyF0CAfbltmP/nbFubG25XQliu6IIudyb1329GPwlXxKFwVz5WPllh/t6XVSnRaK0kCQEX9We49uTO9kZ4Y5vB3QAghw0HTB+PIaLbgcJMWBSvn4YNPuhAV7s9NIeSkRKK0uh1+3iK8tnGR0+dwNuweHe7P9W8YrMviwa3JkCt02FDSBKOpDzER07kheAAoGN7b45UwFu5zdHgA1x5a2LrZFaO5z+H2ErEXrtq8Xo+5DxKbK3Wj2QKJWAQ/J6+dlhiGjSXN8PMWcdMqwm1tj6Ptc9ty98RcXqdCUVkLju5MQ0WdCnJr4BUS5OP0uLHvgxBCRgsFBeOotkGDqHB/rkJiSJAPtpW1YMf6Bahp1OCxuJnMyc5FJ0hHCnPj8ezmeoRJfWE092FdhvOmRVq9CZtKmhETMR1XzUxLZFmcFL5iEUqrlYiJmI7+/v4hJeW1qS9hW9kp1DRouIZJIUE+6KxXQas3YUVqJEpWVyNUyox+JMVKERMRgGd/yUw1HNyabPecWZuPMc+hN2GFoFkVAG6f2emKkCAfbp99vT2xqaQZ6zKikJcZjSX5Ndxrs0P0tY0a5KTOgUTshc5uM2IiApCTOgfPbq7nbesqoXRFaiRKqpXYWNKMHrMFK1Ic98kAmKDFA8CB+nMosR5nAMhJicSmkmZc6DajTX0JlVuS4ScWYVvZKWsnyLN2ra4JIWSkTH755ZdfHu+duFNp9SbI4mZiTogEwMBVa3TEdBxu1CIymLl/V7USfZZvMH9uINAPyOJmMk9g+28boUE+yFwcjja1AZEhEjzG217KXeXGRAQgNMgHkSESqDqNSLMOewNA5uIIAMzJa0VqpPUxHggJ8rEOyTv/d+biCPQDeGTuDPwsOw4Ac5KeKpqMyBAJt3/dl69Zmyvdi6miyQj0n4b3PvrCrunUtrJT+H2+DN2Xr+EPL8kQ5D/N4fvJXByBHnMfoiOm4zcvfBdTRZMBAE/MD0b35WuIDJEgJiIAmYvDoeo0IjJEAlncTEwVTcZ7H33BHe/fVSqszxfucFv22AlHCyRiL+59RYZIuOkFP2+R3e9s/txAAMAM/2n4zQvz4Sf2QkxEAObPDcQjcwOh1ZuQuTgCoUE+mD83EDP8pyEyRILi9QmYP3cGAv2nAfBAaJCPNVCx/R0QQsjwUJ2CCchotuDBrAN4bdPjXLZ7Qe68YbeBvl2wqymE79PRksKR1KY24NnNx7DXurLkTjnehBAiREHBBMW2KQbAdYP8tmPn/YXkCt2ov/878XgTQogQBQWEEEIIAUBLEgkhhBBiRUEBIYQQQgBQUEAIIYQQKwoKCCGEEAKAggJCCCGEWFFQQAghhBAAFBQQQgghxIqCAkIIIYQAoKCAEEIIIVYUFBBCCCEEAAUFhBBCCLGioIAQQgghACgoIIQQQogVBQWEEEIIAUBBASGEEEKsKCgghBBCCAAKCgghhBBiRUEBIYQQQgBQUEAIIYQQKwoKCCGEEAKAggJCCCGEWFFQQAghhBAAFBQQQgghxIqCAkIIIYQAoKCAEEIIIVYUFBBCCCEEAPD/AMaZT5Qv1TB3AAAAAElFTkSuQmCC)",
"_____no_output_____"
],
[
"\n### Using *FranceConnect*\n\n[*FranceConnect*](https://franceconnect.gouv.fr/) is an identification and authentification service from the French government. In short, it makes it so that you only need one account to do all the online bureaucracy, instead of creating an account for every public service. \n\nYou can use your *Ameli.fr* account to create a *FranceConnect* account!",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 12-16-34.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAOMAAACVCAYAAACq9U2QAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDEyOjE2OjM5IENFU1QqulZIAAAgAElEQVR4nO2dd5wV1fn/3+fM3LpVygLSO6g0ARXFjt1EjEaJGhM1sZAYa2yxoIiJJYk1sRv1G40lsXfxF7ErTakiINIXdhe23nv3zpzz+2Nm7r1bKCLGu3Derxevvcudc+bM7HzmnPM8z3mO0FprDAbDD478oRtgMBg8jBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE4wYDYY8wYjRYMgTjBgNhjzBiNFgyBOMGA2GPMGI0WDIE+wfugH5hNYarTTSkmg0C1+bzSf3voVONTLilLEMOf1ALARaKYQQIMQP3WTDDoTQWusfuhH5gHIV0vIGChVLynnz5hdZ+dlX7P3TMYQKwsz75zSKOxdz4DUn0Wl0PwQC7SqEZQYXhu3DTi9GrTRCej1coibB23e+ycwn3mfY4UMYe8ERdOjdCYCGihqm//lF5j89jf5HDGefP5xEQdcOXh1GlIbtwE4rRq01WoOUAoXm46c+48073qBzp104+HcHM/Dg3b3jXIWGTK9ZPnspn//p36yZvog9f3MMu//2WGTIRmsNWiOkEaVh29gpxZg7JF084xv+c+Mr1JdXc/BZ+3LgGfsjpEQrDQJvbgig8eaKlsRFs+TZD1h46wvgJtnzulPp8qO9sJBo1wUps+UMhq1kpxJjrgg3lNfw7M1v8vmbc9n/uOEcedGhlHQoQgMo7dlmcgWlNQjhDWv975xkinm3P8/ie16l3ZDujLz1bAp374b0RSks6we4SkNbZacQo/IFJITAdVxeeuAj3rhvGgNG9GD8RQfSe2h37zhXIaTYfK+mQaM9wfrCrl66hkXXPMma5z+g1zmHM/iaU7F3KUAgQCkwQ1fDVrBDi1FrjVIay5IoFNPfXMQ///g2kZDNcRP3YZ/xQ7GQXo8pJYimZb2fILSGTYg0MN5oYN3UmSy//HE2lq9gwNW/pMc5x3pVKgXGFWLYAjusGF1XYfk917Iv1/HoTVP5evZqjjpjNMeduw/RaBit8Y0uAjQgPBEKIbzeFDKWViA79PSHrNkvPGOQV4/i67ueZ/WUp7D7FdN/8m8oOWQIFpYZuho2yw4nRqU00hdQfW2Kx//yPlOfmsXe4/pz6sX706VXu8xx3twPz2eYI0KtNJbtCfn1F76mz7p59DvzWKQFuA5Iq4kYM7cwZ+iaXr+BZdc/xqp7X6TslLH0uf5cIr07+/5JI0pDS3YYMXqehUCImpefmMM/b3+fXbuVctrFezNibF+gqRGnOa6rsSxPZF/M2cjfp3zG1PcEE9Y8zGUjFpO65XZKD90XyzsYWhGU1k3nk3UzvmT1Hx5gwyef0e3SU+ly8S+RsbDfmxpXiCHLDiHGYEiq0Xzx8SrunvQutRuTnHzecMb/Ys9MzweeESfoBT1jDCilsKQ3p1tXkeTB2+fwn4cWkm4QdOw3iJqqFfz9m/MYzHpqTjuHohv+QLxXV28+6Lro1lwZfmid5wqBqv97nQ2THiAZTtBz8kUUnjAOKzDwmPmkgTYuxtx5YfnqOv5+44e89+aXHPuzofziwtG0ax/POPcDa2ouSnlqlJbA0ZqnHl/Mw7d+wZolNRQVSeJlvbAjRZSv+Ya9nFncu/5GkkCquCNceiVFF08kHA97vaQQntW0+XzS91ciBG5DkvKbHqb2zscIjR1Il8mXExk5CIncZE9r2Hlok2LMdVWkki6P/20WT947g+F7dePsK0cxcHcvhE25Gmm1YgHVGtfV2P68cNq0cu794yxmT1tDPG4RiVpoGSXasR9apWksX8RGHeamDX/muMZPqfV7s+TAPbCum0LRhB8RArTjeHPBZqLXWiOUBv/F0fj1Sip//1eqX3qFXc49gQ7XXIrsUGpcITs5bUqMnqsCLEugUbzz0nLunPw+0YIwZ144jCOO6w9IlKt9f2Fr5XWmN/16WR1/u/lz3nhqMcLVFBaHcZVGK0WsQx+sSBHabSSx7ktS2qa7u4ZH1l1GVLooIbCUohFoPOZ4IpMmUzhyN29N2qaGroB2XIRt4QLJtz6k7vc301jxJaVXXUzs3LOxJWZVyE5KmxFjrnFl0bwq/nLtp3w5r4KTzx7ELycOIxoNNXVV+GjtuSi0Pz+UUlCXcHn0b/N54p75VK9roKgkjJQSV2mUkyZc1IHILt3RygWtqC9fhOWm2GCVcn7tY0ysfY5qIbEDvShN0orhnHc+hVdeRrxLO9AKrb0DWmjKj2NFSpSG+r89gnPDzTh9dqH4+usJHX54RtRm6Lrz0CbEGBhcqqtS3H3TDF58ehGH/LgfE68YRvduRQB+b9hyXojWnmPetgDNy88v5/5bPuerWRUUl4SxQl5P6t0GjZAWsY79EFbYm+sph4byRWjlorEIk+If639PD7eSJN7qbGFZSKW83q5zL+S11xL/9S+JWt6qkOAl0LxdgSAB9PoqGq69loZ77yV+ysnEbrgB0bcvpm/ceWgTkxMhBP9+9EuO2/s5Fiyo5a//HMcf7x5L925FKNd7l0iraYSM1jpjWNG2xZxZ6zl1wkdc9fOprFpQRbsOUYQUuI4CgrhTl1BhB2QoitbKF4LnjNQIbJ1mgyjhgaKT/FXZ/jldF1drpGURX7sMa+KZuPvtzZqpMxFSIKXAdVpcFEiZbWfHdsT+fjel02dglVfRMGoUzpQp4Di0gfelYTvQJsT4xL3zuWvKQn591TAefeUw9t2/m+ec9y2huXgPt/KGhpZF/fI1VJ71O6y9RhB//hl0u/aEC8Iox/GGkEFB5WKF44QKOqCV67lAcuoVaFxhUaTreSN2IB+FBlCAxtWAEEghSDsKS0Ch1LzxaSnnHFbF9SfPY/nCjVh2cBrdRFxCCLAshNZI1yU0chjW268RvuchQn+/j9SVV0Bg2DHs0LSJYepPxjzPsb/YjTPPHeD1hL4lVQCZCaHwH1itwbJIpRxqbr8PecsNRDasJ4Y3InwkdhR/LzqVNDYFOoErLG9upx2i7Xthx3dBuY5Xv99bBsNUIQRCuzSIOEMb53N/5fW4aDQahaREaNbqUv4WupL/qhOJqwaSJCkqdBj/2xJOvqwHhaVxtK8rIQO3i/CvAc8VohTatlHPvEDD7y+gaMliMmo27LC0iZ7RCkk6dYo3ibLxfPZ+MDd4Qz0pcSyLDS+8Re3e+xK58nziG9YjLYsEgjpXc2bdq9xXeS39nK+pksVeELhysGIlWLGSjOhaRXuiK1ANzIgM4cX4fhSg0QiKUUzT+3CBeJL/pn9CsVuJRSNFlk26NsI//lTLxJHzefP/VoB0EBKUm1M1AL6vUkiE6yJKOiLDYXS68fu9wYa8oE2IEQ3ptNvCcR98Fmi0ZVE9ayFVx01AHH8EBV9Mx7YkSgi04yDRSCGoRjKscQkPVlzLyfWvUStjODJMpKijV5NvLAp8+NLyXCmWJbBs6cWsWoKYTvFQ4QSqRAgbyR1czHU8SJXuRjEbUMJGa4HjaCwBu1hh1iy1ufH0NVx60Fzmf1zRZJ4bRAZlLlla6GCiKbb8Z3JdjeMoXFe3+Oc4ygtwMOQ1bWbsEzy03gOb/SyAlAuVV91A5PZbKUjXIS2J0sLz6XmFUf6DbqOpQxDWjUyqfoDRqVnc3fViktESLBpBSFxHk0oplAIn7VC3PolSLpZvjLFDklikkfWRTvyl+Fc0VI/kEw5lF6oAjYuN0L4VVQi0hrSjiEhBVESZ8a7DnLFLOfqMcs6+uRfxXQr8ONXgReMHseOn8tgKk6rn9tnUgcYm2xZoM2JsDddV2LbFw4+vQNz6H86ljho7jHYa/Qi0IGWG72v0/9loXCGoR3JoagZPx2J8lRYk6h20ZVHcwab/wChlvaJ0LrMpkQVYEjbUNLK6vIHlCzaybFENjRUbeSG2P50jA2mXqsIFhJfM0V+RJXz/ptcruwrApdjS1Ogyvn7wfhqjNYTvuhvLddHC8tsciKfpS2dz9+HFJ5bQUOcGo9wMQngLTbr1LuSAI7ptz9tv2M60aTFq7T2ia5dW81H0ZE53F2A7aVSOCHORQQ8pBEpYFGmH24t+xcdqCIPb1zLiuE7svV8RQ4YW0LlThKy7vXuTehw0S5fU8M4rK3jpyYWUfzyfLmIoAuXNaYVscu5AlEEv7bgCy05xunwCe0EvFGBLr23bkjvHdTR3TJqBkyxAa4UlIzn3yMW2iugz9BsOOKJbkyVmAbnhhVpnl6G11pTAit3kvm7i2NbKBufJrcebEmzddTc//5bKBlFXwYtta9v6Q9CmxRgM5YqtJAtlb962R/Pj9AdUI7AzvVNwrPewSyFwhSCmXRbSjoU/nshNJ7fnoP170L4klK3cD73zgnoy5hWEENgSBvQtYcDvSphwxgAevHM2L19bTlSVIdCoJj7KnCrRSO1STwmj1FsMVd9QFR5JBLKLk7fpRkBpuygV5SlKd5H0GZS9DinDCCSDR3TJuWdNCR7mYL5stRLP2/zY5rQm8s2VbX5s8/j61giy+W0tQZuaX8/WtPWHoE2L0UOgENja4V+FP+Lguo+xcVE0tU4FQlTCS6Mhxh1O+8tu4I5xIwj6EdcNlln5/zKZOJqLyousUUpTWhTi0j/sxR4Dl/GX0yuwk94tzfSETUqBRiJp5CfiISyyQ1CRsQ1vyx3wExXoCLuPKOaWfxy46WObPfGJBofyVQ0UlYRoXxZj5dd1LPuqls7dY/QbXNqi/JdzqqjZkKamOoXWXjxvr/7FdO5asNk21lY3UlGepF2HCCXtIny9qJqVy+poTCl69Suibyvnatl2+GZJDSuW1pGoTxMvDNG+Y5SBQ3dpcV2B1b22OsVX86upKE9QWBSiZ79iuvYs3OK5fgh2ADGCQhLTCeaFBzA1uhfjkx9R7Q0OAe9hlUJ4wdtKEb72OuxJ19ERQCtcJTzLaTOj5aaGjMEfWkrPAuqkFUee2IsPn6jhv8+lKbYsz7mPN0T1a0Pi0kARw5nKKGbSAEi8vKzfZeyUSTSgFI6jSTe6OW9/b1hm2bLpyi7/+8/eW8vVZ8+grKugS48CZn24Hlt35vhfFXPBdXs2iQlOpVwmnjgVp74dSqcQQuLqJJG4y3Gn9uK314zwh4HZEwXL3F59ein3TF5Ep+4QLwzx1dx6LBHHVSmk5TD+9F5cPHlUq0vdtNYkEy5/uuxT/vvKKpzGCBIbKSLIaAWvfnECsbjd5HghBK8+s5R7/zSPqrVBmyTCrmP8z3tz/jV7EgrnlzNhhxAjAFpga5d/Ff6IcclP8dZF5MwTpUS5LuGfnERo0nVox0HjRb94aW2yi4+3RO4CZa01oZBXZr1YiySO1sVoXF+IWV+o95vix/wTG0gRBNptB/yHOBQShMJbDi7PCljjOA4rv25kzTeaonY2nTq79OhT7F9rtowUgmNO6kNjSlGyS4gNlSk+e6+ctStSPPPAKgYOaceRJ/RuMgwMzuO5XVxWLE0Cgn67F1PaLsLCz+uoq3F4/h9rGDb6Gw4b36tFeSEEN1/+KVOfq8LVip79JH0GFaGVJBzr1kpGTcEXn61n8gXTEcLm2J91Zey4riyaV8U/7pjPfx5eQ0HhHM65fNh3vu3bkx1CjF6omiSukswND+Dd6DCOSc6kRkjQyhOkUkjLJvSXW7NrBqX0A3i2zXASlJv23hoeu3M+099bTcfSXRDVJbnhCIhMr1jIED5kDB9Qj4WXDnn7zF200iidZtniGh69cx6uUr5bRVC8S4jjf96/yTyp+eXaYcFVtw1l/8O7EiuwsG1P0Lll7JDg4skjm5RLNDicfMDLbFwn+eTdtRx5Qu8WBhavHolAEIkJ7n56HENGeVsjzJtVwQUT/ktjAmZ8tI7DxvfKlA/u7/IlNbz1/DcIGeGgI3blujvHEI23/ugGZZ59ZBEhq4ihY0JcddveABxwZDdqqxt57h9reempJUaM3w9BH6SxtObpwmM4JDkTKwgAD4ank29E9OwBSjXJPbMtQgx6Ra01+4zpxIbKJHNnVrBm/To662LCdECTRiD9GB2vfcfzMGEghYWFu908gMrVKNXI8sUuD9+6GgCtFVJEUNZajjulXxNhNbeICqEZPbYzRSXhTQamCyHYWJVk7vQq6uvSaK1o1yFKYZHNhrWCREPaPy5bJiMs4Y0NpBT07Of1ukppdh/RgR59C1g8R1FX3bS8t3YV5s+uRLthFGlOOXcQ0bhNY6OLZckW1lEpBY6jWLxgI0o5uE6IZ/+xiGTCIRq1qViXQOkEGyubR+7/8OwgYgTQKCGJ6wZmhocyLTKUw1NfUGuFEG6ayIUXE7rycrTjgB0YWbatR2xeNmQLjju+Dz37lbDwqyqm3rOMJe8kiYqQN79EkaCQwXzKPnxAg99Tbs+YGCEFUoQo62qz90HF2VQjWJS2H7yFIB6vJfV1aTroaGuZKBECpn9QzuTffULlWo0UUfCjctNuNVLEEBRljm/tFIGJKpX0phDBC01KiVKut6UC2XMHP+tqHaSwsWNpSttHAQiFWi7eDv4myQaH6qoUjkoy++M08z5zM9eotYPSElR4q+7r/5IdSIwi68/T8K/i8RxUORfhulg9ehK+6UZvH4ycxbrfZT+M5vNGx1EMH9Kevr2Kee6uuSjpIFU3XLy3vYPkWB4nhqYWiSWyfkcNLRZFf1ukJZAyzMAhpVx5697f9mq8OnzjS8ueUeO6cMekGVSWS3oOFJxybn86dIpRXZXi/lu/YO03YotGqMCY1VRsOf+X4x4OYufBjy4SGqcRGlMurqt9n2XWkObV4f20Q5JozCJkFbLPuBJOOmsgruOtd9U6mzk+39iBxAhB71hIghmhIbwXG8Yh9bNwJk2BWAyddlrNibOtZB9cAdqL/7z1uunMeL+CbmUFqLUdkFgkiTOAmRzAOzTgOWOUtjKujUzrt7GnFgLwV4KkG91MLGruQ9rc19bqaUTL77ILu5OsWV6PEBFOnTiEo0/snTnmuccXs25FutW6N3c5Ta83dz7bVLRduntWV6Fspr2xkr6DSjfrC43GbDp0jlOxOsWGyhSj9++86UbkETuYGD13hlYaEbJ5Qh3IwUPShH5xqpdXxt7+KSxye0gh4YCjuvHCI4tI2mnsgjVE67uTxmI8j1CISw0SC5V54CTZHnKbh8yAZfvB7JbEtuVWCzsQqmUJWhs3B3XE4jaxAptUg+LVp5fSd2AJobDFF9PXsWpZHUJam82jJUQ24L553YFjPjunzc4vAYaM6kj7TiEqy10eu2seSmn6D96FdWsamDO9gt9cPZyyLnEg60o58MhuzPvsK+ZOr+SKs6ZxzIS+RCKSjVUpli6sJZVMc8GkPbd4f/6X7GBixBv2CYu4SDKbgXx+xv7sKUE5IO3vZ2gSCBEN+x7Qha79i1i7pI5kSRUq0Y0+egEH6Kn+XFE1Mdo48bjXqSm97WtoNNRsbATVjvo6r4faUkRL0CGnG11QcVIJJ7Oyo0W2SaWJFdj89MwB3P/HJcz+oIEzj3wXgZcJL+3WEbHKqK91mtSd+zmV9M5TV1OfYy31zlNXmwZVQEO90+L8WkNBYYhLpozm2vM+oTER5dE/r0Lr5UgZxlUJzr7MzRxrWd6L6PjT+/PhO6uZ/QF88HodH7wxy2+RhRQROvduMGL83tFelm6hXFJ2lK967MkIrbebCyF7Gt3idyetiYUt9jm0K0/NmU+0QJIoWMsJDQ9R4DrUe5vFZeeIgLJDmd+3dZhqhyQXTtqTmo0u3Xp7PcSWQ9O8n4OHtec31/bGDgnal0VbLRsEN5z+293p3K2AT99dR6IhRaddC9lv3K7YYcGcTzfQZ2Bhk7qBTE845uBdiUTChKM9KCr2wvWCSz3zoj1YtypFr0z5pi4YrTUHH92dB16K88Z/lrN+TT3RuE2PvsXsMbI9nbsWNqlPCEEsbnP7Pw/mtWeXMXdGJXW1SaIxmy7dC+g3uB0D9thyxM//mh1PjEKglUKGLFTaZcXcCsQJvdDaBbbfMDV3SVd2qOd9N3psZ566ez64No2F1XRMzgc3a03MLb89hqlSCo6d0Heb2t+1ZyETzh641ccfPr4Xh4/v1eL7YaPLWhyb+3ngkHYMHNKu1brH/bjnFs+tNQwa2p5BQ9tvsa0Bdkjyo5/14Uc/67PVZX5I8iseaHvhO/nDYYuvvpb+nEqitmMeGZ3Tk6XTikWLq/no/TVUVCXpO6CEgqIQ2nVJizD3Fp9CGu9mZyJxdG5YQLbObWVbFxEHluAgLndrzhNYM4OfXhTP5s+tNZs8j+uqLZYXgm99zpbtVZk25GOymR2vZ/SfduW6RIuKWbUqTFVNmvbF3sr77+JbbHEqDQ1Jh1nzKqlZleCtt1awdnkdd9x/IJGwRSKhKVT1fBYZzivR/Tgh+UEmZnZ7PwubX1y8aYQQ2N9iLr0pK+aWhsVCkMng3rLOresTWhs+b4mm7c0/d0YuO1zPqAmyrwni7buwYZ3DN0uTvhC/m28xFyE8a180LBk7qhP7HbwrG8uTnHzaAGoSDrW1jdiWF3ka0WkeLv4pFSLqbQOwXVrQClp7uYAMLdBK5f292aHEqPGd8MrFjrcjFC2gsSHN57MbvG+V/k5DwSbn0tqLd5WCuQuqOGzoM2xc1cD4k/pSGLcp7hDFTSu0lERVkqV2T/5ZcAxxNAqRScux3QhMkCYDeasIKfP+3uw4YtQgtG+8scOEi8sy6S6+XpIgSDIF20+QnvChS1mc488ayIBR7dFa07lTnLMuH0ptXRpLCpSQFKl6nir8EV9anYihUcLavoMmIVCVlbiTJpE+8EASAweSGDQINWQIzmOPece01jN4S/uzaS43R+7337ZM7nm2hq2tv1ndehP1q7lz4ZVXoLLSOz4PJ407hBiF8HyIwg+bChd3RNoRXMclXiD54O2NrFrXiLSEvx1iayFf3/acIrMAuX37KJdetSeTbhvjjxQVp50xiINP7M3GyiTSsrC1Q7UsymQjb5Jm8rvgP1hq5UqcsWOxrr+e0LRpqDVr0PX1yIULUUuXtl422BsyWMzpWUk2d9HZzzllNiWAJmVyzxO0e1Nt+jb1N7uGFpvP+mUb//QnOPZY1Jw532nt6PdJmxZjIKhEwqG2vpFEbQpXR5HRdgjt5T8NRSSpOpf77lpNJjHxdjDiBPPSzJpGSxILW5m/swX84U97UdarkHTKQVkWxaqet2JjeT88iCLleAmsvusb2h+epq+4gvDChSQHDsR9800KVq8mvmQJrF9P+JJL/EY1G6ZJiVq1CjVzJvqLL9AbN25yOzqdTkNVlfeL48CCBeiZM1E1NZvcfVknErBhg/dLbS3688/RX3yRFVBrSOkF88+bh/vJJ+iVKze/u7OU6MpKr+5Zs1CLFnnlA4RA19UhNm4E20Y3NEBNDSSTm67zB6JNizEI9t19eHsO/nEv+o/pSPdhPYgXh6lPKRL1isaEYsS4Ut57ZyO33LzC36n4u7kRoOkSquafpfRS/XfrWsgZlw6hti7tJanyTb33Fp9GQthebGoQKL6tSAnr1uG89BKOlITuuQfrsMOgsBDCYSgthaKiJkW01pBIkD77bJzevZEjRyKGDUP170/6j3/MHgOZnkW98w66d28Se+9NQ5cusNtuiJEjUUOG4L7zTlBxkzLp226DXr1I7bcfDR06IIYPRw0bhrP33uigt84dxgLu//t/OLvvjrPHHqT22Yd09+6kfv1rTzythPY0TpmC278/YvhwxJ57IgYOhAULsse5Lsnhw7FfeYV6rUkeeyyUlJC+887vcte/F9q0a8PyxTjuRz048JjuxGM2uLCx2mH1qkaee2o977y+gZAtuOOhAUx9o4qaGpfSEmuL4WJbQ2vO7eAhtm3JxpoUy5bUEAp5O2Ap4WUjnx3ejecKD+G0mjepRG773NFfJO0uWkSspoZkly7Ex4xpujV5y9g2hJQ0TplC+IEHSAwahHPhhejKStRNNxG56iqc3XbDPu64pnO2dBpRU4OaORPrmGNIDRiA/ugjou+/T+Kii4jNmJFZmhaUEakU1NSg5s/H/tnPaOzYEef554lPn07q+uuJPPpotn1CoDdswJkwAau6Gl58kWifPqSvuorIgw+SHjiQ0KWXZue9loXz7LOEr76aZDiM+t3vEL17Q309docOuX8k7F//GufuuwmtWoU+6STUrrsiR4/e1rv+vdGmxRgQDklKglQTNsTLwuxaFmbUiEIW/3ZXunUOEbUlQwfFM2W+j2lDsGW5FLBiRR1nHPs6FSvqKSgM4SqN0J4xJ6aTPFpwAofVvE1Ep/0MdNt0Qu9HKoUlBLKkxBNE7rCu+YVKb8ty99lnUZZF+MEHsfbbD4C0ZcFVV+E8+aQnxtzytg1CYB11FNHnn/fOu3Ilyf790atXe0PB0tKmZcJhTwyXX07oiisAsA4/HI46CvXll03ag2Whpk0jsm4dyRNOIHr00WilsH/zG3j9ddTUqXDppdmt2gH3scewpcS+6SbsYCjeHMsidPnlNEydSnzlSvTvf48YObL1Y39g2rQYvWRJ8K+HFvDRBw3sd2Rvho+MM2ywl6lMuZp+3bzcbxpYV5mmrF32kreXzzG3vkCQu3YrpOOuBVQsb/CXOHnGJa0FEdXIynAXHucwJuqEP7zdBldHEFIX8hYxuxs2QGNjRjhAVuW5Ak0mUeXlJEIh4n36ZHoba+RIL0Z21apsmRwLrA6skEGPads4loX044Ez5wteErnH+/WIjh29z7nzOv94tWgRlpSo116jPhbL3M+CdLg1hfYAAA1cSURBVBp3zZrsNfvXppYvB62R++7r7RbtulkXRu71K4VIpzO9r/C32ROhnNSceUCbFmPgxF+xPM201xLMnb0SGdLsc2gp51/clZ67RqhucPlyYYKnn1zHcT/tQFl7b0/HrY36+Hbt0eCn+pi/YAMVaxuwQ16wQbBllhDgIikRDfwndgwjRDkHC4Fm65c9ZQjW/fXqRSIeJ7J+Pe7HH2ONG9fimOblhG0jXBfteDtuIYS3+BoQzYabwWcR/F+O8FoYoHKH7jllM8ajQNyttSsa9cQ1diyh889H19V5RpdwmEjHji2PD9qZSmW21cv8X257cg1XkUj22DyjTYvRG7Foxh7dm5dfWkpYaiJhyXsvVrFwdj29+0dZtzbNsoUJijvaXH9jTy8fgPh+/xBCCAqKQtTXppFCEBhuArHZtkWqLkVVOsbaI09Bao2DwPq2PbU/JxQ9eiDHjsV+800afvc7oo88gtxjD8/osWgRbn19U4HGYsh+/Yh+8gnOyy8jzzsPAPf117G0Ruy2G+BFrWQW+krpPei5D7YX49bSShtcR1Cm+bB5E0Npudtu3vWsW4d1zDFNBGvlujf8ubLs3x9mzCD92GNEDjrIq7e2Fm3biFjMO9afk8qiIs96vGQJcv/989K90TasqTprGMntObylPXDI/qXc9kBfug+OU7XBobDIIrHBZca7NaxdnAQJF1/fg1jEwnFVE+vn99XWRXOqcNNN0ztYtkS7iqrKJGX9irjjmUOZcMZglPZiKLPX1swLuRUPTviWW2hs1474ggWwzz409OpFQ48esO++pG+5xTsocKQLQeiCC7xeaOJEkkccQWL//QnffjupWIzQOed4p82Zn+nGRm9oWVubPalS2DU16KqqplbR4HMi4ZVJJLJlHMfbjbm6Ovt/vpjlvvuSGDWKyOzZNJSVkTzqKJJjxqBLS2m88ELv2Jxhs33OOSgg8sgjNAwaRGLUKJyOHVEff9z0esEToOvSeMEFuCNH0njjjVu8p/9r2kbPKHP/1pkxn+839CLyDxhTwt6ji3jqqQqefHAtVasbKSywSKQUBx3fniMO3gVcjd1seJrrmtjSEDH3hRDMDb3fs99r7bXu7Ze+oWZdil06Rr26NVRXpSjqGOHcS4fwy4m7UVLgZWILBJttg58tRpO1iG4K/3s5bBjWhx+SvvVW1KefIurrYeBA1F57Ef7JT/z7mL12a8IEnEgEcd99MHcu0rZRP/0p9iWXIIcOzVo5/TKiZ0/UhAlYo0ZlT11QAGeeiR0KecaaoD1+GTl6NOqkk5C5ZcrKUKecQqhvyyVfIhYj+vzzpK+5Buv993E//xzRuTOMH491/PHZa/Cv2TroIJx//xt1553Yy5bhhkLo005DDhjQ4nrt884jvXgxof/+F+rrkV26bPqe/kC0iZ2LTxz7ImdeuidH/bibt/NUKHuTA20qV2NJQAjWrG/k4fvW8v/e2sDlN/Rk3IGlaD8nzOqKNLt28Awewh/uaOHFim6uB8oVYmt7NWh/iY7raiJRm3v++gX3/GE67cti1G5IoSw49KQ+nPf7YQzoWwKA66hMTp4mLwLHy2bmTptO8pyfE5/7BSIS2/xNat7+7+K72R5+n/8Fue1slmxskwQ5c/OQNtEz2pZg/pwKjh7fDVvKJinnMxEvltdbKVfTpWOYP1zdg3PP35X2Jba//g2efbmC7t0ilJXa3rIhy/L3qNiah88zvijtCVGhWba8jmjUomO7KCFbYksLOwQ1DWnefW0FthRUVSQZsm8Z5141nIMO6gp46/qCzVdze+TgBaFtLzmHtWC296IIRTbXMDI3QmtvnhdYEwNLZk5vlYt2/V2ag+9yQ9FaQ3lbETSJiNlcmWDImmMB3eoyudbZTV1DEL4XGGmCYzclylwh5uELp02I8ezfD+XKcz+kvq6R868cSrvSKGj8LdT8QV2OKIMFp+1LbFzXW1nxwltVlBZbjNnTi0ZJVGxA3HU3kYnnoDuVbVaQgWCCutZXJrnol//lqy+qiMVsStqF6dC9kAGDS4nGbd59aTmff1BOr91LOf2CPTjljIGELIlyvQcnSBgFZIej2p/LWhaiLkF6yh+x/noLkdtu8x6grXmjC+EJMef3zfUWopXwuM0iWwlQ2FIWqtbu6bcps4VraGEI+jbH5hltYpgK8Pmn67j5ik9ZX9XIz387gNPO2t1P25/d9jtXTxkHvJ+/JZFSxKMW6yobefbler6853mmDPqIwof/5mUc31R8pS9EpbwlWErCeadM5f3/LKe0fcRbSe54PbKjFLV1jRSXRjjpvMH86oIhdOkUJ9gjUErZ8hnQ2ktGZUkcwPm/p9DX/AEnFiY2eTL2CSfk5VvcsP1pE2LMPouK/zy+mL/9aTqde5Rw9uVDOOCgHoA3Z8zNidrCMKM1roI169Ncc/UK+ndMcdUfd/OTsrX+sOfW4ToKO2Tx55tmcv8Ns+hQFvMS4wrPSpqsT9OoNPse2Y3zrhzOsCFerpZgXtiqkSiIPAGcT2aRvuwyEnNnUnjppUQuvgQRCRsh7kS0CTFC0w0ua6pT3H/rLJ775wL2P7InE6/Ykx49vVAs5XqZo3Mf+qwF9tuv1tDay6Fi25Kpb6/gghOnUhT3MrpJS+A0utTWpOk7rB1nXzGMH4/vDXgiFFK0NPT4PaHwrbpueQX119xK9QMPU/iLYyiZdD2iV09vOJjHxgbD9qfNiDEg13izZOFG7rj+A+bNXsuJZ+7BLyaOIF4Qzvj6cn18mV4OPDEImvQ4rbk2coe6K1fXcdq4V6lblyQSt1GupmZjipKyGKecvxs/P2cwJQXhzL6MgUGpSZ1ae/NDv12VdzxK4sa7EQOKaTf5esIHj8USbL1l0LBD0ebECGTmYJYl0SjefW0Z90z+ANu2Of2iYRx5/CAvXXDOfLJ5eaD1oSNN54kohbYk5/xsKp+8soLS9lFqqlJoW3D4yb35zeXD6d3L21UpsJK21vtq10VY3q6R9a++R/WVd5La+DXtr76Uol9NwBY0XW1h2Olok2IMyBVbOu3y5L2zePLez9htWFd+ddkoBg/f1TvOVchvEYuasZ46CsuWPHT/Qv56yYcUFUaorkkx7IDOnHfFcA440Kt/UyJsPiRNfvkN667+Gxtffp125/6Uztecj9Wu2LMGmyHpTk+bFmNAsL8CQGV5Hfff9D7TXv+SI0/cndMu2If2ZYW+98B3hWxFz6N9caRmz+G4n3zM0lUheg0u5BcXDeGkn/cnLCWuH1onWxkO5/ZyTn0Dayf/g4q7/kV83O50n3wJkaF9vPzifo9pMOwQYgzInU/Omb6a+254m43ldZz46z057qy9vWGp8uLVNhcCp5Xysps6adYccBRnfHM0+/3qECZeNJgO7aK+ZdZ3VQAImlhttd8bOigqHnuL8usfwY2n6D35QorH74/lixDZco9Bw87LDiVGaGp0AcWbz8zliT9Po2NZESdfOIZRh3hxi5sbumqt/b0eFVXT51MZb8+APbxYxs3PC1VmSFrz2QK+ueIhqj6fR89LJtDzklMQ4ZAnVq03n9fFsFOyw4kxINcVkqhv5Om73ueNRz9h9KGDOenisXTt46VmCFwhCNEyuiQH19V+RFZT32XGOuuLMLG2kq8nPc7qh9+gy8/3o8+kc4h27+CJ2wxJDZthhxVjQG4PuHLxeh6f8hZff76CI04dxdHnjiVW4GcCUL4rJMfJHqy20JAZ2vpfZGM/fVeFdl2+vvNFvrnp34R3K2XwlPMoHTvYzAsNW80OL0bwh65KIy1vQ7ZZ73zFMze/iXQ1x5y3L/ucMAKJtzGOFJLNdpFBnf6QVAMr35zB0qv/RX3FWgZfdSo9f3WEl2bKuCoM34KdQowB3pVmQ9xev/993rjvXfoP7sKRFx9K35G9gOye71tyVWxcvJo5U/7Fqpc/ZfCZR7DbtRMIFXhLnbSfhc1g2Fp2KjEG5A5dN5bX8MKtb/DFq7PZe/yeHHHBOEo6eesNtd+zBVbYIKInXZ9k1l9fYPEDb9Blr76MvOF0igZ38+eFWSOOwfBt2CnFGJArymWzl/PClJdoWFHJAWeNZcyZByIty3OFaK83dFF8+cyHLPjLC7gSxlwzga5HjvDnhQpa6U0Nhq1lpxYjBOkEQUhv18SZ/57BW7e9RFmnYvb77WEMHDcUgNUzFjPjtpdY9/liRv/maIacd1QmR4xxVRi2Bzu9GANyh6GN9Une/ftbzHz8PfqP6kc0HuLrd2bR7/Dh7H/lCcTLSlqUMRi+K0aMzcgduq5fso7pD7wFqUaGnnYAXUd6SZTMvNDwfWDEaDDkCeb1bjDkCUaMBkOeYMRoMOQJRowGQ55gxGgw5AlGjAZDnmDEaDDkCUaMBkOeYMRoMOQJRowGQ55gxGgw5AlGjAZDnmDEaDDkCUaMBkOeYMRoMOQJRowGQ55gxGgw5AlGjAZDnmDEaDDkCUaMBkOeYMRoMOQJRowGQ55gxGgw5AlGjAZDnmDEaDDkCUaMBkOeYMRoMOQJRowGQ55gxGgw5AlGjAZDnmDEaDDkCUaMBkOe8P8BnXY0QWHC/9EAAAAASUVORK5CYII=)",
"_____no_output_____"
],
[
"## Applying for a *Carte Vitale*\n\nA *Carte Vitale* is just a card with your definitive social security number and your photo, which serves a proof of your registration in the French social security system.\n\nOnce you have a *Carte Vitale*, when you go to a doctor's appointment or a pharmacy, they just scan it and the partial coverage is automatically applied!\n\nAs I said, the form to apply for a *Carte Vitale* arrived in the same letter as my definitive number. This form is really easy to fill, I only needed\n- a photocopy of my passport\n- an ID photo\n- a signature\n\nAlong with the form came an envelope and an address. I had to use this envelope to mail them the filled form back. Luckily, the envelope came already stamped, so I did not have to pay anything to send it back. I just went to a *La Poste* office, asked where to deposit my letter, and that was it.\n\n---\n\nNote: Once you get the form you only have 15 days to send it filled over the mail\n\n---\n\nAfter sending the form, I received my *Carte Vitale* in the mail in one month.",
"_____no_output_____"
],
[
"### Optional: getting a European Health Insurance Card\n\nOnce you have a *Carte Vitale* you can request an [European Health Insurance Card - EHIC](https://www.cleiss.fr/particuliers/ceam_en.html) using your *Ameli.fr* account. This card may be useful for having health coverage if you are travelling outside of France.",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 10-46-35.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAApUAAACiCAYAAAAQsTabAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDEwOjQ2OjM5IENFU1QgqAOTAAAgAElEQVR4nOzddVgV2f/A8feluwUFFBQUEbu7c+3uXrs7dtU11rVdXbvWtVtXXbsDY8XEIEQFFZXuZn5/IFcuXPIiut/feT0Pz8OdO/GZOWfOfO7MmRmZJEkSgiAIgiAIgqACtW8dgCAIgiAIgvDfJ5JKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWUiqRQEQRAEQRBUJpJKQRAEQRAEQWX/U0nlyQ5tWKslY62WjOCn7t86nByLDQpif7VKbDI34umWTd86nP/sdhQEQRAE4dvRKIiF+Lve5EjDugDYNGxEh3OXlI7nc/wYp7t0BMDYwZE+z72Ujhfh58sOBzsAzMuWo8f9x18h6oLz9solAh89BODp5o24/Dj0G0f0/Tnfvw+ee3dnO17dZSupMHa80unanT5P0SZNM5327vxf+Hf+XADq//4H5UaOVvh+u70tUe/fATAiOgE1jcx3H9+zZ/Dcv5ePd24T+e4tyQkJaJuYYFLKCZsGDSnVoxempZ0BiPB9g+fePfhdPE/ws6fEhYSgrqODaSknHDp3pcLosajr6GS77oIgCILwLRVIUmlVvQaaBgYkREby8c5tkuLjUdfSyjCe3/lz8v/DXnoT5vMS4xIOGcZ7f+2q/H/bRo2/TtC59O7qFY41a4TdD61pc+xkrqa1qFARDV1dEmNiKFyr9leKMIUqcQrZi/n0iXN9e/L2csYfTjEBAcQEBOB/8wZui39jwKu3+N+6ybnePUhOTFQYNzkhgU9u9/jkdg/PvbvpeOEK2qamBbUagiAIgpBrBZJUqmloYF2vPm9OnyIxJoZP9/6lSO06GcbzPXdW8fPZM5QbMSrDeO+vX5P/b/OdJJXehw7keVoTx5L083pN1Ad/LMqVz8eoMlIlzu9FqZ69MbK3V/qdVfUaBRtMGhG+bzjSoC6R797KhxWqWIlClSqjoadHpJ8f729eJzYoCPsf2qBXuDA29Rqgrq1NclIS1nXrYVW9BtompkS9f4fXgX3EBgUR9OQxd+fNod7K1d9s3QRBEAQhOwWSVALYNmrCm9OngJSkMH1SGfbSm/BXPgBoGRsTHxaG77msk0qZujo29RooX6BMlo/RZy0pLo6XRw+rNA9dS0t0LS3zKSLl8hRnAW7HnCrdb0CWl7G/BSk5mYuDB8gTSgPbojTfuYcideoqjJeckMDrf05gaGcPgI6FBS1278fMxUU+LFWFcRPYU8758zQnRVIpCIIgfNcK7EadtJep39+4luH7tGcpHTt3BeDdlcskxccrjBf98SOhXp4AFKpUGS1jY6XL0zQ05MMtV051as9W60JsMNBmh4Mdl4YMIuyld6ZxvrtymWvjx3CgRhU2WxizXk+TbbZWnGjbCp+/j2YY32PPLnaXLU3Mp08AvDn1j/wml7VaMm5Om5zpslJ9crsnH/9M9y4Zvg/18uTKqOHsdnFig5EuG0302V2mFCfb/cDTLZuIDQzMdhl5jTOv2xHA59gRTrb7gW02lqzX1+LPooU51al9hjPS/ws89+zi3dUrAGgaGNDh/KUMCSWAmqYmJTp0olClyvJhdj+0zpBQAhiXcMDo8/D4iPCvEbYgCIIg5JsCO1NpUb4COhYWxAYG8uGWK1JSEjJ1dfn3qf0pjR0csWv1A8+2bSEhKgr/G9exbdxEPl7ahDSr/pRvL17g8oihSElJ8mERfr48/+tPXh49TOdrrpiVcVGYxvvwQc727JZhXjGfPuF79gy+Z89QefJUai1cDEBSbCwXBvTN5ZbInbeXLvJPx7YkxsQoDA/19iLU24s3Z06TnJCg9IxuKlXizMt2TE5M5OKg/nju26MwPPrjR16dPM6rk8epMGYcdZf/nqeYvkfPtm2R/1950lSMHRxVnmdMQAARfr4AmDmXUXl+giAIgvA1FVhSiUyGbYNGeB8+SHx4OIGPHlKochUg5ZLg26uXAShSpy5Fan85w+N77oxiUpmmP6Vtw8yTyiujhqNlaEjpfgMwdnAk8q0fTzdvJC40lPjwcFynT6HN8VMK05Ro1wEj++IYFC1K8TbtMCvjgkxNDf/brtxfsoikuDgerFiG88DBmJQshbq2Nl1v/Uuolyfn+/WWx1932Ur5PPUKF1Zho8G18WPkCWX5UWOwbdyE5MREwry8eHPuDEGPH1G6T78s56FKnHnZjnfnzZEnlFY1alJuxCgMrG0I9fLk/rIlhL/y4dEfq7CoWInSffvnedt8LxKjo/lw+5b8s1Mf1X9oJERGcq5PD5Li4gCoODH7M96CIAiC8C0VXFIJ2DZugvfhg0BKcpiaVH645UpCRETKOA0boVuoEOZlyxHk/oQ3Z89Qe9FS+TxSk0p1LS2llxdTaerr0+3ufYzsi8uHOXbuyoGaVQHwu3iBpNhYhUe1qGlq0sv9RYY704s2a05yQgJuixYiJSfje+4sJiVLgUyGZZWqCo+W0TI2xrJK1Txtn/SSExMJefEcAFOn0hn61FWeOp3E6Gg09PSynpEKceZ2O0Z//MjDFcsAsKpWnU6XrqGmqQmkPE7K7ofW7ClfhoSICO4vXZynpNJz724+3rmdcTXV1KgyfWam07lvXM+bU5nf8f5ByTxzItTbS373to6FhdJL2bkR/sqHU507EOT+BIAac+ZRvE07leYpCIIgCF9bgSeVqd5fv0aFcRMA8D3/pY9d0abNASjWvAVB7k8IfupO1Pt36FvbEBcaKn8Yt1X1GlkmUy5DhikkQgCFKlfBrIwLwc+ekpyQQJT/e4yKl1AYR9mjjgCFBCz6g39OVldlahoa6FpaEvPpE6HeXvgcO0KJDp0Uxsk2oVRRbrejz9HD8n6wLkOGyRPKVAY2thSpXQffs2cIefGc2MBAdCwschXTix3blQ6XqatnmVT6HDuSq+XkVGxQkPx/PUsrleYV/NSdY80bExMQgLqODo3Wb8Kp99ftYiEIgiAI+aFAk0pjB0cMbIsS+Tbl0SpIEshk+H7uT2lRvoL8Mmyx5i158PmM15uzZygzcDD+N28gJScD2T9KKO0l9LQMbG0JfvYUIEM/RYBQTw+8Duwn4MF9oj9+ICEqCiRJfiYVyHDz0NfkMngI9377FSkpidPdOlO4Vm3KjxxNiY6dM02A81Nut+PHe//K/780dDCXhg7Ocv7Rnz7mOqn83sjU1JT+nxdne/cgJiAAHTMz2pw4jVW16qqGJwiCIAgFokCTSki5BOqxawexQUEEv3iOXiFLAh7cB1IuM6cqUrcemvr6JERF4Xfu7Oek8rr8+6z6UwLoW1srHS5L+xYUSVL47tZP03mwfKk8cf0eVPt5DvHh4TxZvxYpOZkPt1z5cMsVPSsryo8aS8XxE7/q21Zyux1jPn3M1fyTExJyHVN2b8bJ63Rp36iTG9omJvL/0561zK0g9yfyRL3+qjUioRQEQRD+Uwo8qbRt1BiPXTsA+HjndsojgT4nJcWatZCPp66lhU3Dxrz+54T8Jh5/15tAyiVfqxo1s1xObs/ivdixnftLU+7q1tTXp9bCxdi3boN+EWvUNDV5eeQQZ3p0zdU884Oapib1Vq6m7LARPPpjFZ57d5MQGUn0x4/cnv0TXgf3f9W3reR2O6ZNyFsdPIJl1WpZjq/q5eLvgZF98ZTneUoSUf7v83RJHyDq3Tv5/4WVvBxAEARBEL5nBfacylRpHwP08e4d+V2zGnp6GW68sWvREoDYwECCnz/j0303AIrUrpPvl36fb98m/7/uilWUGzEKw2J28j6BCZGR+bq83DIt7UzDtRsY8OotNecvRNPQEICgJ4/5d0Huz659LboWheT/J0RGYmBjm+Vf+j6X/0VaxsYKj/zJ64PwLatWo8P5y3Q4fxk9K9WeGiAIgiAIBa3Ak0oD26Ly93kHPnpI0JPHANg0aIi6trbCuMVatpL/77VvD0mxsUD2l77zIvz1a/n/yp5/GfT5BiGl0rx1Ju3zHL8GLWNjqkybQadLXx6tlPrQ7WwVQJxpX5P46sTxr7KM71GpHr3k/9/77Vfiw8JyPQ8dc3NsGjRM2RcKoL+sIAiCIOSnAk8q4ctNNsHPn8mTtdS7vtMysi+e8ugewPvQwQzT5ydNAwP5/5F+fgrfRbx5zdMtmzKdVjvNW31Cnj/Pt4RNSk6WP/w6PaPiJeQ3hSj0b8zC14ozLYdOXeR9PF8ePczrf04oHS8xJkb+ZqT/BS4/DkXH3ByAyLd+HGvRhFBPD6XjJkRGEubzMsPwoCePOdm+Nf90bEuIx4uvGq8gCIIg5LcC71MJKWcCn23dTEJkpPyycrHmLZSOW6xFS0K9POUJiJaREZafn2+Zn6zr1pM/E/LysB+pNmsO2qZmBD56yMOVy0jO4o5vw2J26BYqlPIGFN83nOrSgeJt25MQEYG6tjZlh4/MU0zJ8fHsLFkcm4aNKNq0OWbOZdDU1yfK/z3uG9fL+y8WTfOopqx8rTjT0itcmKrTf+LOL7NAkjjVpSNOvfqkPE5KJiPq7VuCnz/j9amTlBs+khpzF+R6Gc//3MrbSxeUfmffum2G98oXBB0LC5ps+ZNTXToiJSURcN+NPRVcKFKrNmZly6GuqUmUvz8Rb14T8OA+zgMH03DtBoV53J79E29OpzxIXqamzg+HjxX4egiCIAhCXn2TpNKmQSOFz4ZFi2HqVFrpuMWat+Txmi8P/bau10Dh9Y75pfLU6XgfPkhcSAih3l6c799H/p2Gri5tT57hXN+eRH/4kHFimYzKk6fJ35/9+p+TvP4n5SHbDh07q5SsScnJvL10kbeXLir93qJ8BSpPnZGzmX3FONOqOuMnYoODeLT6d6SkJF7s/IsXO//Kl3kDeB3Yl+l3elaFv0lSCSkJbeujJ7g4uD8xAQFISUm8v3Gd9zeuZz8xio9mSoyO+lphCoIgCMJX8U0uf+tZWSnc2JD2UULp2TRoqPDInKze960KI/vidLl+C4dOXdAxM0NNUxP9ItaU7NaD7v8+wKZBwyz7clacMImGazdgXrYc6lpaaBoaYuZcBpuGjTKdJjvq2to02rgF+9ZtMSxaDHUdHWTq6uiYmWFdtx51l62ky43bCo+0yc7XiDMDmYy6y1bS+ZorTr37Ymhnj7qOjvxh7tb16lNz/kLKjxqbf8v8Tti1bEVfDx/qLv+dos2ao1ekSMp2NjDAyL44Ng0aUmPOPCqMHZ9h2koTJqNnZYVekSJUnjL9G0QvCIIgCHknk6R0D2sUBEEQBEEQhFz6JmcqBUEQBEEQhP8tIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVCaSSkEQBEEQBEFlIqkUBEEQBEEQVFYgSeXx8644Ne6X6V+ZpgMA2HfiEs36TP4qMZRrMUjpsu8+fP5Vlvdf9TXLIK/LqtN5DMfPu+Zq3kfOXKfbqLl5DU34yjoNn82mvScz/b5Fv6lcvHm/ACPKH9ExsTg17sfDZ965mu7yrYeUazEo3+IQ9V8oCP/V/fRrmTB/LT8t2/qtw/imNApiIfWql2P/mtnyz5N/3UC96uVo36wOADKZrCDCYOKPXalR0VlhmKO9TYEsW8hesiTRrM9kLu5ervK8ilpbUr50iXyISvgWqlUojaWFybcO4z/re67/nj5+LN9ykI0LJ37rUP5n7Tx6nviEBAZ3++GrLkfsp0J6BZJUmhobYmpsKP+so61JEUtzKpZxLIjFy9nbFC7wZQo59/CpN0Eh4fkyr2rlnahW3ilf5iUUvAWT8u+s3f9H33P9v+j64FuH8D/vsusD6lQr+9WXI/bTjArqJNn3qkCSytxQV1fnkut9Vmw9hO+7jxS2NGdQ11b0aNtIPk5ScjJrdxzj0KmrhIRFUMHZgTnjB1BShbOOf/x1lOt3H3Ng7RyF4d1GzaVhrYqM7NMeSLkUu23pVM5dv8eOI+coZm3J4fUpl5k8fPxYsmEvbu5eqKnJqF3ZhRkje2NT2AIAH19/uo78hS2LJ7Nw7W48fPwwMtSne5tGjO7XQaEyPvf2ZeG63Tx65o2hgR692jVhZN/28nECgkJZu+MYN+658ykohCKW5ozs215+9tfH15/OI+awY8V0Fq/fi7vHKwwN9GlRvypThvVAW0vzq5fBqct32HXsPF6v3yEDalR0Zs74/liYGmdY5tGzN5i9YhvxCYk4Ne4HwG9Th9CpZT0AomNjmbl0C+ev30Mmk1G1vBM/j+mLtaW50nXYd+ISW/ef4vyuZfLPl1wf0LZJLdbvPs67D4FYWZgyqNsPCut16NRVth08zVv/AAz19ahQxoE54/pjZWGa4zoyYf5aKpcthZWFKUs27CMgOJQr+1ZiZKjPxt0nOH31Ln7vPmJkqE/rxjWZMqwHap/LdcL8tTg72hEZFcPRszeIjI7BoZg100b0VEgSgkMjWLppH5dvPSQmNg7bwoVYPH0oZZ2KA3D6yh3W/HWMN+8+YlukEJOGdKNZ3SqZlnlScjLrd/7NkbPX+RQYSiFzE0b2aUfX1g1JSk7OUdzK1tnU2JAb99xZunEfL9+8p4ilGSP7dkBLM/P6Byn72bThPWnXrHa25ZJeTsvJ7Ykny7cc4Lm3Lxrqajja2zBhUBeqf76akd0+CPDnwTPsOHyWwJAwnB3tmDa8Z5brBRATF89va3dz+updEhISqV+9PE3qVM7TOmTWHuW1/nu9esvCdbtx93hFeGS0fLiJkQF3jq3LsC45qRtpzVn5J/tOXAaQ7+eX9qzAprAFIWERLFy3m8uuD0mWkmlcuzKzx/bDyEBPYR2a16vKht0n+BQUgr1tYX4a3QeZTMaSDfvwev0WCzNjhvRoTY+2jeXLnTB/LS4l7fkUFMrJi7eIjI6hfOkSzB7bj1IlisrHCw2PZMnGfVxyfUB0TCylHYoxeUg3eZ1InZcq+3f6aTu2qMfj5y85snGewrbadew863cd59qBVairKfZU+xAQzMJ1u7n78AWxsXHYWlvSu30TurdtTNtBM/F+846bbu4s2bAPaysLLu9dkWVZhUdEUbfLWFbPHUPjWpUUyrd+13GM6teBXu2bZCjP9Ptpnc5jWDB5MCcv3uLa3Ucgk1G1nBOzxmbeVue2DqVuR6cSRQkJi+T4hZvExMRRpVwpFkz+kSu3H7DtwGkCgkIpXqwIP43uQ9VyX9rOr1nPADTU1fl92yEOnrpKRGQ0FZwdmDWmr0I9y0kMN++5M3ZgZ35auoXn3m9YOWsUTbNow78X311SGRgcxpq/jjF9RC9sC1twyfUBc1b+iZNDUSp9Psu4ZvtR9p28xNwJAyluW5htB08zeOoSzu1cio62VqbzTpaSSUpK+jJAJsuws+bE8s0HSEhMZMn0YZgaGwDw/mMgvcf9SrN6VdixYgYJCYls3neS3uMXcHzLQnlliYyOYfbK7fw25UdKOxTj3hMPxv7yB0YGevTv3AJIadj6TVxI07pV+Hl0H976B/DTsq0YGerTt2OzlNDVZETHxrFg8iCsrSw4cuYaMxZvpqKzA3a2hYGU/l1TFm5k4ZQfKV+6BK/8/Bk8bSmWFqYM7dnmq5fBuw8BtG5UkxqVnImIjGbywg0s3bifxdOHZlhmmyY10dLU4KdlW3E9/AeAQuK7+s8j9GrfhN2//0RUbBxzf/+LWcu3sXXxlByX2+0Hz4hPSGTDrxOxsTLnyu1HjJ69Sr5e9929WLBmF0tnDsepRFECgkK5df8p5iZGOV5Gqiu3HuDnH8C4QZ0VztS/+xjI6L4dKO1YjGder5mycCOO9jZ0bllfPu3aHcfo2a4xJ7b+io62Fmt2HGPMnNVc3L0cfT0dAEb+vJLwqGjmjh+AhZkxTz1fYVukEACubk+Z9OsGJg/pRv0a5bnx7xPGz1vDoXVzcXYspjTe37ceYu/xS0z8sSvOjsV48+4jJYpZA6CuppajuJWt81v/AIbPXEHnVvVYOmMYIeGRLN24nzfvPtC4diWlsaSXn+WSKjYunqEzljOkZ2t+mzqEqOhY/n38gkLmKZfycrIPnrjgytJN+5kxshe1K7vg4ePHjKVbMj0Qpvp1zS4u33rA4ulDKVrEkqu3H7Jo/Z48r4uy9kiZ7Op/XHwCQ2euoHXjmmz4dSLBoRGMmvU7DvbWzBnXX+k8c1o3Us0Y2Rs1NTX8/ANYPWc0ALo62gCMnr2a2Ph4Ni+ahJqaGvNW7+CnpVv4Y+5YhXVQV1dn7+qfMTDQY96qvxg9exVWFqYsmjYUZ8diXLh5n/Fz11DZpaTCgfyPv44yqFsrzuxYQnJyMos37GXQtKVc2LUMHW0tkpKSGDxtKWoyGStnjcTY0IBTV+4weNpSdq6cqXCVS5X9O/20lmbG7D1+ER9ff0oUKyIf7/Tlu/zQsIbSY9SMJZsxNNBjx/LpqKur89TzNUaGeqjJZBxcN4fuY+bTulEN+nVqjuzz9NmVVaNaFTlxwVUhqfz30QtCwiNp2aB6pvUqvSkLNzBrbF8WTBlMTEwck35dl2Vbnds6lGrdzr8Z3rsdF3YtJzomliEzltN99FxK2tuwc+VMTI0NWbn1IBPmrePS3uVoaqSkO1+7np28eIuGtSqy4dcJqMnU2LjnBIOmLeXi7uXyY1pOYnjz7iOjZq+iR5tGTBjchdIOytvu7813l1RGREWzaNoQeSEN6taKExdduX3/GZXKOBIeGc32w2f5ZXx/mterCsC8iQOp320cx87dVPjVnd74eWsVPuvqaPPw1OZcx/juQyDHtyxAXV1dPuzPQ2cpYmXOb1OHyIeVdx5L456T2H/iEkPSJHFDureWn1GqWakMQ3u1Zcu+U/KkcvuhM5iaGLFg8mDU1dRwKlEUP/9PbNh9XH5AszA1ZsmMYfJ5jh3YmT1/X+TxCx95UgkwpEdrKpctCUDJ4ra0b1aHW/efZplU5lcZDEm3jH6dmrP72AWly9TU0JDvcHq6Ohm+r1KuFKP7d/yyvgM6MvaXNSRLUrYH8VRx8QnMnzSQokUsAWhSpzLlnR3k6+X7/iMWpsY0qVMZNZmMYtaWVClXKkfzTu/uoxec3r5Ynuil+nXyYPn/djZWnLh4iwdPvRUaT3NTI6YO7ylfrzH9O7Jl3z94+PhRuWxJrt99zGOPV1zYtRRrKwv59kn1x19H6NC8DoO6tQLA0c6GOw9fsHnfSVb8PDJDrGERUew4co4FkwbRtmnKGYdKLiVzHbeydf7r8Fnsba2YO2Hgl/jmjqVRzwnZbUK5/CyXVMFhEURGx9CuaW35NixT0k7+fU72wc37/qFH20byzw521iQlJTF54YZMlxsUGs7Rs9dZ8dNI+cG7pL0Nr9995O9zN/K0LsraI2Wyq/+er/zw/xTE6H4d0NbSpIilGb3aN2Hzvn8w0NPNdL45qRupdLS10NDQQF1NTWE/d3V7yoOnXpzbuVRef+ZPHETHYbMUkq24+ARmj+0nT/4HdfuBI2euM2VYD3mb2qJ+NeyLFsbN3VPhYG9lYcrYAZ3kZ5rnjOtPg+7j+fv8Tbq3acTlWw/xeOnH5b0r5PMvU9IO33cfWbPjGFsWfbmpUJX9W9m0LiXtOXHBlXGDOgPwKSgEN3dPpg7voXSb+77/xOh+HeXr52BnLf9OT1cHNZkMTU2NDG1pVvF1bFmPCfPWEhUdK//xevrKHWpXccHMxJCcaly7kvyKma62FoO7/cCIn1dm2Vbnpg6lKlzIjJF9U87Y6+vp0PWHBsxbvYOdK2ZQuJAZAD/2aM2fB8/g++4TDnbWBVLPDPV1WTJjmHxdl84cTr2uY/n7/E26tW6Y4xg8fPxY9tMI2japleNt/z347h4pZKCnq1BAAIULmRMYHAakXJ6JjomlYc2K8u81NTQo7VCMFy99s5z3lKHdObx+rvxvz6qf8xRj++Z1MjTgbk88qF+tnMIwTQ0N6lR14d/HHgrD058tqlGxNJ+CQggOjQDg4bOX1K9WTuEXavnSDgQGhxEUqrzPoZpMhlUhM8IioxSGu5SyV/hsYWZMwOdtmZmvVQZFLM0Ji4jK9PuspE8iChcyJyExkfBczM/IQE9+QE1lbmokX6+61cqRkJjIgEmLuOnmjiRJeYoVoHpF5wwHHGWsLc0zrIOzo51C46utpYmBni4BwaEAPPF4RWmHovJkKK1kSeLJi1cKZQNQvnSJTMvG6/VbYuPiM1yCzW3cytb5ufcb6levoDCsiKWZvEtITuRnuaSytjSndhUX+k9axNGzN4iNi1f4Prt9MCkpCe/X76hfvbzCdFWy6cfo/fodiYlJ1K+hOF1VFZJkZe2RMtnV/5jYeGSAuvqXdVZTUyO3W1tZ3cjOw+feONjbKNQfZ8diaGpo4OHzpd4aG+pTxNJM/tnUKOXMbKnitgrzMzEyyNA/u7RDMYWuCzraWlRwdpDvF27unpR2KCZPJFI1rFmRe49eKAxTZf9WNm3HlvU4fvHLEy7OXP0X28KFqODsoHS+XVo1YP4fO1i/6ziBIVm35zmNr0H18ujr6nDu+j0g5bL0uev3aNO4Zq7mWTbdMcfc1Jj4hNy11TmpQ07pjlEmRgZoa2kqnFRJrR+px82CqGflnEtkaL8ruZSU17OcxqCjrUXrXG7778F3d6Yy7Q09qZRepF0AACAASURBVGQykD43bf4BQQA06TVJYZyYuDilZ7jSKlrEUv4rQxXK+nFFRsUojd3M2IhXfh8Uhhno6yl8Njb8XPFDwjAzMcT/UxD33T05du6mfJxkKRlI6UtpbmLE67cf2LL/FPfdPQkLT9n5QsIjMyzfxEjJ5bBsDsr5VQb7TlzmxAVX3n8MJD4hkfiEBNTy0N1AWUypB4fcJBhKtwVf1svC1JjD6+eyae9Jxs75g8KFzJg6vAcNalRQOl1WlNaR6Bi27jvFlTsPCQwOJzk5majoGOqnm39mcaYe3T8FhmR66TcoOIyExESmLdrEzCVb5MMTEhPR0lS+u38KDEVHWyvT/SencStb58CQcKVnOVK7g+REfpZLWht+ncje4xdZvf0ISzbuZWjPtgzo0gKZTJbtPiglSyQlJ2OWrhyyW6/A4DB0tLXkl3xzOl1WlG13ZbKr/+VLl8DYyIB1O/9mRJ/2hIVHsu/EJfnVCGVyWjey8+FTMC/fvKdq2+EKwxOTkggICpV/NjbUVzq9vpK6m75lMNDPeLbV2EhfnhRERMUo7T5gZmJITFw88Qlf9iFV9m9l07ZtUovF6/fy4KkXlVxKcubqXdo0yTyhGNGnHY72NmzZ9w/rdh6jU8t6TBnWI8szytnFp66uTtumtTlxwZWOLepy9+FzomPiaFY38/JXxsRI+VnNzNrqvNYhIyV1If1+lWbhQAHVM10l9cxQX/7jLacxFDIzyfFVuO/Jd5dUZrcN9XV1UZPJOLz+FzQ0FMPX0cm8P2W2y81keEhYhJJxM45tamyo9CxicFh4hgoaHhml8CsotbJZmKXcwKKvp0O31g0Z2LVVhvlZWpgQHRNLr7ELaF6/GtuXTcPSPKWRat435/0Ls5IfZbBl3z9sO3CaFbNGUrWcExoa6py44MqCNbvyFlOmJZS/LMyMmTmqN+MGdmbbwdMMn7mCfWtmU8HZIXd1RMlGnDBvLZHRMSydOZwSxaxRk8n4efm2XJ/VMTM1wt3ztdLvUhPDeRMHUjndJWyZmvI1MDc1IjYunpjYOKWNck7jVrbOZiaGhCpZv7Q3geREVuWSXk7LSVtLkwFdWtK3U3NOXb7DvFV/kZSUxI89Wme7D8qQoSaTEZbuh1x262VmYkhsXDxx8QkKfYbTT6dqe5QXOtparJk3lv6TFrHr6HlMjAxo1bCGQreT9PKrTuvr6VDG0Y5Vn/tZpmVspPwAn1vKYgoMDqN40ZTLjaZGBjzzep1hnODQCHS1tRR+lKmyfyub1sTIgIY1K/DPpdvYFLbgvrsX8yYOzDBeWs3qVqFZ3So8eObNrOXbmDBvLZsXZf7c35zE16llPToOnUVwaARnrt6lYc2K8kvhX0t+1aGcKIh6Fhkdk2FYSFgERa0tcxXDfzCfBL7Dy9/ZqeDsgJq6Gm/ef8KmsIXCnyod9y3MjOWXn1MFh0bg/yk4R9NXr1Caa3ceK/waS0xM4ua9pxku3bo98VT4fNH1PlYWpvKzcVXLOfHE4xXWVuYZ1lFTQ4MXL/0ICg1nVN/28oQyPDKadx8Dc73eeZGTMrjp5k7TulWoWakMGhopl+aee2fdPUFbS5OExESS8+Hypqr09XQY078jZUrZ8+jzg6xVqSOSJHHr/jN6d2iKo52N/Bfoi2y2iTIuJe15/vINHwIyLldfTwdnRztevPTNUDaZ3X3paG+DpoYGl289zPe4nUoU5fb9ZwrDPgWF5Hi/Sk9ZuaSX23JSV1OjbZNadP2hIQ8+zzO7fVBDQ50Sxay5/UBx3R4/f5ll/I52Nqirq2eY7lG66VRtj/Lq5MXbDOjcgn+Pb+D8rmVM/LFrpme481o3tLU0M3Q3qFrOCR+/9+joaGXY3lmdfcuNR899FG7UDIuI4r67F86fb4CoUdEZDx8/PgaGKEx35c7DbPvw5sf+3allPc7fcOOS6wNKFbfF0S5nTzOpVMaRyUO6KTxwX1tLk7i4hFzH51SiKE4ORblw042Lrg9o85X78+Vnu5gTBVPPXpKUnCz/HBMbx313L8o42hVYDN/Sfy6pNDMxZEDnFkxbtJG9xy/yxMOHuw+fs/3QGZ54+GQ57Uu/97g98VT4i4hKOUNQtbwT7z8Gcvy8K0nJyQQEhTJ7xTbUMjm7k96Ari0JDgtn+uLNPPV6zaPnLxk3bw1AhkcObD1wiuPnXfHx9Wfficv8degsP/ZoLf9+ULdWvPLzZ9y8Ndx0c8fd4xXnrt9j8+c3kNgWsUBDQ529Jy7x7kMg9554MGbOapUuoeVGTsrA3rYwrm5PefzChzdvP7DjyDnOXLub5Xwd7G2QkiW2HzzD+09BmfYf/Vqu3X3M3uMX8fDx492HQP4+fxPv1+8o9/lsmCp1RCaTYWdjxcmLt3j99gMePn7MWfknHwJznyQ0rl2Jkva2DJu5gku3HvD4hQ+HTl3F69VbAMYN7Mxfh86ycush3J548uCZN0fOXOfkpVtK52duYkTPdo355fftHD59TV7fLt16oHLcAzq34Jn3Gxat34PXq7e4PfFk4vx1aKjnvOnJrlzSy0k5vfsQyOrtR3j4zJv3n4K4/eAZ52/co+LneWa3DwIM7v4Du45dYP/Jy7zy8+fCDTdWbDmY5SWrQuYmtGtai7mr/uLa3ce8fPOenUfPc/KiYtmo2h7llffrt8TFJ/D4hQ/uHq948+6j4hMz0shr3XAqbsuDp17cuv8UP/9PxMTF07BWRUrZ2zJ46lLOXvuXp56vufHvE37fdoio6Nh8WbeY2DgmLljP4xc+PPHwYcyc1Rga6NHu800ldaqWpXLZUoyc9Tt3Hz7H69VbVm07zMWb9xnZt0OW886P/btBjQokJCSy/dDZbG/OWLpxH65uT3nrH8Azrzfs+fsiFdLcnV6qRFH+uXQbr9fveP32Q67i69i8LtsOnCY2Nl7lLibZyc92MScKop5Fx8Yx+df1PPHw4annaybOX4e+no68nhVEDN/Sd3f5OycmD+2Ouakxfx46y7sPAejr6lCudIkMNwSkt2rb4QzDdq6YQfWKzjja2bBo2lDW7/6b2Su2YWZqRL9OzdHVzaSPRjrmJkbsWz2LRev30m/Cb0hI1KxUht2rfspw+fvXyYNZ9edh3Je9wtjIgOG928rvIIWUu9r2/TGbpZv2M2bOahITkyhsaU6XVil3wlmam7Jo6hBW/XmETXtOUszGkslDunFJyZmmryW7Mhg7oBOfgkIZMHkRajI1GteuxKbfJtF73K+ZztPa0pwZo3qzYfdxVv15mPkTB8mff1YQDA30OHbuJss3HyQxMZFiNlYsmDRI/hglVevIkhnD+OX37bT78SeMDPXp1b4JU4f24MJNt1zFKZPJ2PzbJBat38u03zYSF5+AvW1hls1M6aPTqFZF1s4fx9odx9h24BSamhoUL1qEUf0yPzBOG94THW0tVm8/QmBIGJbmpkz8savKcdvZFmb9gvEs23yAnUfPY2lmQv8uLbApnP1NDqmyK5f0clJOujraPPN6zZ6/LxIZFYOlhQntmtZmUPeUN5Bktw9Cypml4NBw1u38m/mrd+Job8Oc8f35eWnWr2mbPbYfv67dzaQF64hPSKRahdKs/mUMQ2d8eZOUqnUtLyRJopJLSTbtPcnOo+flw60tzVkzb1yGm/4gb3Xjh0Y1uHX/GaNnr0ZdXY0Da+dgb1uYzYsns2LLQeat2kFoRCSmxobUqVJWfqVDVR2a10FfT5cRP68kPCKKimUc+XPJVHk3BJlMxsZfJ7B0034mzF9HeGQUpR2Ksem3STl62oCq+7e6ujptmtRix5FztM4mqYyNS2DGks0EhaR0r6pd1YVpw748I3V0vw5M+W0jnYfPppi1JSe3/Zbj+No2rc3ijfto36xOpmep81N+tYs5oa6m9tXr2c+j++Dj68/wnz7XM5eSbF0yVb4tCyKGb0km5cetlEKO+Pj602rANK4dWJXjzvWCIAgFYd7qHfi+/8TCKYOxNDdFkiRCwiL5ZdV2YmLisuyv972bMH8t+nq63/0bYJZtPoDbEw/2rp71zWIIj4ymbpcxbPh1IrWruHyzOIT/pv/c5e//BSKPFwThe3P26r/0aNNI3k9bJpNhZmJIGUc7InJ5Y9X36HtvdxMSE/n73E26tGrwTeM4fuEmFmbG1KjknP3IgpDOf/LytyAIgpC/XErZc+j0VUoWt8XKwpTwyChu3nvK1v2nmDSk+7cO73/Wx8AQomNi2bD7BJqa6vIXEBSkhMRE3n8M4pWfP6v/PMLEH7vl6W1zgiCSSkEQBIHfpg1h2ab99Bm/kKDQcPR1tSntUIy5EwbyQ6Ma3zq8/1lb959i34lLODvasWHBhALpx5hecGgEnYfPQUNDnd7tm2b5ZjpByIroUykIgiAIgiCoTJzfFgRBEARBEFQmkkpBEARBEARBZSKpFARBEARBEFQmkkpBEARBEARBZSKpFARBEARBEFQmkkpBEARBEARBZQWaVD585s2AyYup1n4EldsMpe2PP7Hqz4zv4/4v8vH1x6lxPz4EBH/rUKjTeQzHz7t+6zAU7DtxiWZ9/ruvecutb1Ef4uITWPXnYZr1mUy5FoOo320cI2f9zhMPnwKLIb9IksTiDXup03kMFVr9yIbdxzOMM3PpFpZs2PcNovv/5//b/qvM99TG/392/LyrvI0bMn1Zjqf7Ho+L/4sKLKl88/YDAyYvxtzEiGUzh7P8p5G0bVKLkNAI+TiePn4Mm7ki1/NOliSa9J6Un+FmKa9x5reCXu+c2nn0PFsPnPrWYfy/s+CPnRw4eYXB3VuzceEkJg/pjqG+HgkJSfJx8lo2BV3nz99wY+/xS6ycNZLjmxfQuWX9DOM4O9pRoliRAotJyB//hfbhe2njBUWBwWHMWLqZ/l1acGr7In4a3SfDON/bcfG/UN/zU4E9uv/8zfuYmxqx7KfhyGQyABrVqqgwzkXXB3ma98On3gSFhKscY07lNc78VtDrnVOXXR9Qp1rZbx3GdyG1rn9tkiRx8tJtZo3pS6eW9eTD2zVTfOVbXsumoOv8c+83lC1lT/WKmb9/uG/HZgUYkZBf/gvtw/fSxguKvN+8IykpmZ7tmmT6Gsnv7bj4X6jv+anAksqkpJSzJZkdZOes/JN9Jy4D4NS4HwCX9qzAprAFpy7fYdex83i9focMqFHRmTnj+2NhaszRszeYvWIb8QmJ8ul+mzqETi3rkZSczNodxzh06iohYRFUcHZgzvgBlLS3yTTO0PBIlmzcxyXXB0THxFLaoRiTh3STH9wyizPVm3cfmb54Mw+eemFsqE+L+tWYPLQ72lqa8nFOX7nDmr+O8ebdR2yLFGLSkG40q1tF/n2dzmPYtnQq567fY8eRcxSztuTw+rkKcWa13gDRsbHMXLqF89fvIZPJqFreiZ/H9MXa0lw+j6y2K8CE+WtxdrQjMiqGo2dvEBkdg0Mxa6aN6Em18k4Ztl2yJNF20Ey837zjpps7Szbsw9rKgst7U7aPuro6l1zvs2LrIXzffaSwpTmDurZSeCVYbsvMx9efziPmsHXxFBau3Y3nKz/MTYwY1P0HmtSpzII/dnL7wTN0tbVp0aAaM0b2QlPjS7XPSVnMmziQI2evc/OeO1qaGrRsUJ0pw7qzevtR/rl4i7j4BKqUK8W8iQMpXMhMIb6Xb94zft4a3D1eYWJkSPc2DRnVr4PCfpAf9QFS9jE1NeX7V1Zlk5SczMbdJzh99S5+7z5iZKhP68Y1mTKsB2oyWZb7ZkhYBAvX7eay60OSpWQa167M7LH9MDLQUxoHgIePH0s27MXN3Qs1NRm1K7swY2RvbApbkJSUxJDpy7nz6DmJiUk4Ne5HMWtLzu/KeJlrwvy16Onq8OvkwfLP2dXXxMQklm85wKnLdwgODaeQuQn1q5Vnzvj+yGQy+k5YSIUyjkwe0k2+nLj4BMq3HMyeVT9TpVwpfHz96TFmHpf2ruDnZdu4cusBPdo1ZvqIXgQEhbJ2xzFu3HPnU1AIRSzNGdm3Pe2b1QG+1NcdK6azeP1e3D1eYWigT4v6VZkyrIdCO/HM6w3LNu3n/lMv1NXUKFnclj+XTUNXWytP2z3V1gOn2Pv3JS7s/rJN1+44xurtRzi5dSEli9sCKd2Vuo+ex83DfwA523+zWv/s2ofn3r4sXLebR8+8MTTQo1e7Jozs2x6ZTEabwTNpXKsSE3/sqrAuw2auwNhQnyUzhmXbdmRVbmkVRBufXp3OY/h1ymAOnrrKrftPkSGjUe1KzB7TFyNDfQD++Oso1+8+5sDaOQrTdhs1l4a1KjKyT3sgZT+oXLYUVhamLNmwj4DgUK7sW8nZa/9y9c5jmtSuxMY9J/gQEIxtkUKMH9SFFvWryeeXLEls2H2cQ6eu8ikwFOvCFgzo3IJe7ZvIx/kQEMzCdbu5+/AFsbFx2Fpb0rt9E3q2Sxnnaxx7l20+wL7jl5AkiTJNBwBwdf/vCm1ufhwXc7tvZbYturdtnKc2NzwiirpdxrJ67hga16okX05ScjL1u45jVL8OCmXx3ZEKyIuXvpJT437Smr+OSgkJiRm+j4mNk375fbs0eNpSKSo6RoqKjpGSk5MlSZKkTXtOSLuOnpe8Xr+V7rt7So17TZSm/rZRkiRJik9IkE5evCVVaPWjfLrExJT5/771kFSz40jp7LV/JU8fP2n64k1Sva5jpZjYOKUxJiYmSp2Gz5a6jJgjubq5S089X0tLN+2XyjYfKD146pVlnC/fvJdKNeorNe87RTp69rrk4/teOn/9nlS17TBp454T8mXcvOcuOTcdIG3df0ryev1W+vPgaalMswHSM6838nFqdxotDZm+TBoweZF0yfWBfNlpZbXetTuNlmp1HCX9sf2I5PHSV7r/1EtqP+RnadDUJQrzyGq7SpIkjZ+3RirfcrD027rdUkhYhBQTGyct3bRfqtFhpBQZFaN0G0ZFx0htBs+U1u/6W4qKjpGiP2/rvccvSlXaDJM6Dp0lXf/3ifTKz1/auv+UVKpRX+l+mvXLbZmlbve2g2dKz73fSAkJidL+k5elUo36So17TZROX7kjxcUnSJ6v3kq1Oo6Sdh49l+uyqNZuuHThhpsUn5AgPX7xUqr4wxCpXtex0rJN+6WIqGgpJCxC6j1ugTRmzuoMcTXtPUk6cdFVevnmvfT3uZtS5dZDpb8On833+iBJkjRmzmqpRb+pktfrt7kqG0mSpJlLt0hnrtyVXr/9IJ26fFtyaTZQOnT6qiRJWe+bvcYukDoNny3dd/eUHj7zljoNny2Nnr1K6fIlSZLefQiQqrQZJk1fvEl69PyldO+xhzRs5nKpQfdxUlhElDzO39btlvpMWJghzrTGz1sjzVy6ReFzdvV13c6/pdYDp0v33T2l128/SFduP5R2H7sgn0ef8b9KSzftV1hObFy8VKpRX+neYw9JklLK1qlxP2nwtKXS3FV/STfuPZF8fN9LkiRJAcGh0pSFG6Rb959Kb959lFZuPSg5N+kvvfbzl09bqlFfqUW/qZLbE08pISFR8vTxk+p1HavQTgSFhEuVWw+VRvy8Urp+97Hk6uYu7Tjype7mdrun5e7xSirVqK/0ISBYPqzz8DlSzY4jpc17T8qHbd73j/TDgOmSJOV8/81u/TOrgyFhEVLVtin14sVLX+nCDTepRoeR8nXesPu41LT3JIX1CAuPlFyaDZSu3nkkSVL2bUdW5ZZWQbTx6aW2M2eu3pVi4+Kl137+Usehs6QRP6+Uj7N6+xGp68hfMkzbdeQv0tqdx+Sfx89bIw2aslhq1meydPyCq3T93yfyMizXYpA0dMZy6f3HICkmNk7adfS85Nx0gOTu+Uo+/YI1u6TanUZLJy/ekjx9/KQDJy9LlVoPkbYdOC0fZ8DkRdKYX1ZLHi99Je/X76S/z92ULt96IP/+ax17z1+/Jzk36Z+hHUqVH8fF3O5bWW2LvLa5o2evksbPW6OwnFv3n0rOTQdIQSHhmcbyPSiwPpVOJYqydMYwth04TaOeE/njr6MEhX45Ra2jrYWGhgbqamro6eqgp6sjP5szpGcbendoiqOdDZVcStKvU3MePPUCQFNDQ/4LMXU6dXV1wiOj2X74LNNH9KJ5vaqULG7LvIkDSUhM5Ni5m0pjvHzrIR4v/Vg3fzy1KrtQpqQdk4d0o1GtSqzZcSzbOAH6dGxGh+Z1KV60CE3rVqFTy/rcuv9U/v0ffx2hQ/M6DOrWCkc7GwZ0aUn96hXYvO+kQizvPgSyZdFkGtWqSMUyjhlizWy9U1UpV4rR/TtSqkRRKpVxZOyAjtx58JzkNK96z2q7pjI3NWLq8J6YGBmgo63FmP4dCQ2PxMPHT+k21NPVQU0mQ1NTAz1dHXS1teTfRURFs2jaEOpWLYu9bWEGdWtFmZJ23L7/DCBPZZbqx+6tKe1QDA0Ndbq1boipsSGVyjjSskF1tDQ1KGlvQ/N6Vbn32DPXZdGsXlWa1KmMpoYG5ZxKUK96OdTUZEz4sSsGerqYGBnQq31T7j32yBDX0J5taNO4FiWKFaFds9qM6NOOzXv/yXUM2dUHgHkTB2JjZU7bQTMZOmM51+4+znHZ/Dp5MC0aVMPOxopWDWtQv0Z5Hjz1BjKv865uT3nw1ItVs0dTyaUkFZwdmD9xEOeu38PH119pjH8eOksRK3N+mzqE8qVLUKVcKf6YO5akJIn9Jy7J40y7vLRxZie7+ur3/hPlSpegkktJ7GysaFCjQp5+9UuSRJFCZswe2486VcpSvGhK304LU2OWzBhGzUplKGZtydiBnTHQ1+XxC8WbpYb0aE3lsiXR0FCnZHFb2jero9BObDt4GgszY9bMG0fdauWoVdlFfrk/L9s9LeeSdhgZ6Mnra2BIGM+8XtOnQzOu3nkkH8/tsQc1Kn3pfpDd/puT9c+sDm4/dAZTEyMWTB6MU4miNKlTmeG928pv0GrbpBZ+/gE89XwtX9b5m24YGuhSp4pLjtuOzMotrYJq49NrVq8qLepXQ1tLEzvbwsydOIBLrg945Zd9maZ399ELti2ZStsmtahb9cul17j4BOZOGEARSzN0tLXo3aEpdauWZdv+0wAEh0aw+9gFZo/rR+vGNSlZ3JaurRsy8ceurNt5jPiERAB833+iUc1KlCpRFAc7a9o1q03Dmind2b7msTf9MS/9lU9Vj4t52bey2hZ5bXM7tqzHJdcHREXHysc/feUOtau4YGZimHnBfwcK9O7vtk1rc3nfSvp1asaR09do2nsyZ67ezfV8iliaExYRleU4Xq/eEh0TKy9cSKlwpR2K8eKlr9Jp3Nw9Ke1QjELmJgrDG9asyL1HL3IUW5WypdLFakZAcBiQclnhyYtXCjEBlC9dIkNM7ZvXUdgZcqtKOcU4ChcyJyExkfAstpuy7ersaIdamh1XW0sTAz1dAoJDcx2TgZ4upUoUzRBX4Oftk5cyS1XaQXG+JkYGGZZlYmwg/yGTm7Io7VBM4bOpkQEOdjYK28XEyEDhR1Kqii4lFT7XrFSGT0EhhIRF5Ht9MDEyYOuSqexYORNtLU2GzVjOwCmLFRqmnLK2NM+yrgA8fO6Ng70NtkUKyYc5OxZDU0MDD59M9rEnHtSvVk5hmKaGBnWquvCvkqQ8t7Krr+2b1+HEhVvMWr4Nr1dvVVpW51YZbx5KT00mw6qQGWGRitvSpZS9wmcLM2N5OwHw5IUPjWtXUliXVHnZ7uljqlq+NPeepGzvq7cf4eRQlKZ1q+Dm7kVkdAzJksQ9d09qViojny67/Tc3659hnZ69pH61cgr95MqXdiAwOIyg0HCsrSyoWMaRU5dvy78/dfkOLRtUR11dPVdtR07KLSv51can5+yo2M6UcyqBjrZWpj/gs1K9orNC/UhlYmSQoYtO9YrO8tgePX9JUlIS9auXVxinUc1KhEdGy+tXl1YNmP/HDtbvOk5giGL5f6tjb05kd1zMy76V1bbIjbRtboPq5dHX1eHc9XtAyqXvc9fv0aZxzTzPv6AUWJ/KVEYGegzp2YYBXVvy27o9TF+8mRoVnTE1zjz73nfiMicuuPL+YyDxCYnEJySglkkn3VT+AUEANOmleBdYTFwcero6SqeJiIrB1Nggw3AzE0Ni4uKJT0hESzPrTZZ+PWQyGXz+FRQUHEZCYiLTFm1i5pIt8nESEjPO18rCNMvlZEdpHKT8Uk+Vk+1qYpRxe6TMSPWYUuIC6fPM8lJmqVL7HaWlp6udccQ8lIWxQcZ562cTj3w8PcXxjI1S5hUYHEZiYtJXqQ/VyjtRrbwTz719GTx1Cet3H1foI5heZHQMW/ed4sqdhwQGh5OcnExUdAz1a1TIcjkfPgXz8s17qrYdrjA8MSmJgCDlPzoio2KU1gMzYyNe+X3IwdplLbv6WqOiMwfWzmHDnuO0HzqLWpWcmTW2H/a2hXO9LCsLswzDXr/9wJb9p7jv7klYeMoBIiQ8Mmdxptk3PwWFUC9d8p0qL9s9vZqVnDl0+hoAV24/pEGNCjiVKIq5iRE377ljb1uYiMhoqlcoLZ8mu/0Xcr7+6fl/CuK+u6fCmaxkKRlI6adpbmJEu6a12bL/H6YM60FoeCS37j9jVN8OKdPnou1QVm65kV9tfHqG+hn77Bkb6ucpUcmsvVC2DJM0y4iMikZHWwtdHcW2M/XsWGqZjujTDkd7G7bs+4d1O4/RqWU9pgzrgYGe7jc79uZEdsfFvOxbWW2LzGTX5qqrq9O2aW1OXHClY4u63H34nOiYOJrVrZrndS8oBZ5UptLU0GDcwM7sPnaB596+1K7ionS8Lfv+YduB06yYNZKq5ZzQ0FDnxAVXFqzZleX89XV1UZPJOLz+FzQ0FFdTR0f55TRTIwOeeb3OMDw4NAJdba0cVeqsbvZN3aHmTRxI5XRnsGTpbrCQodpdw9lNn9ftqlJM2axSXsosr3JTFqqIiopR3Dk8LAAAIABJREFU+BwSlnKAtTAzRktTM8cx5KU+ODsW44dGNXB7kvUZwAnz1hIZHcPSmcMpUcwaNZmMn5dvy/ZMpb6eDmUc7Vg1Z3SG71KT5/RMjQ2VntENDgvHWMkPg6+hTEk7Vs8Zw/uPgSxcu5teYxdwYfeylDqhpJKGhEUomUvGUaNjYuk1dgHN61dj+7JpWJqnHNib952S6xjNTYwyXW5etnt6NSo689u6PQSFhnPTzZ3B3X8AoH6N8ly7+5iA4FCcShRVSH6z239VWX99PR26tW7IwK6tMnxnaZFy9qpVw+osWLOLJx4+vPD2xcrClEouKV1BctN2qPpAhvxq49NLf5UoWZIICgmnkGnK+mc2tbJ6ktkNscrOGAcEh1HILOXmTFNjQ2Lj4omOiVVIAIM/P/ovbf1qVrcKzepW4cEzb2Yt38aEeWvZvGjyNzv25kR27Whe963MtkVmctLmdmpZj45DZxEcGsGZq3dpWLNihpMU36Nv+kad0M87g6FBSkavraVJbFy8wjg33dxpWrcKNSuVQUMj5fLfc2/F09DaWpokJCYq9Bes4OyAmroab95/wqawhcKfuYmR0nhqVHTGw8ePj4EhCsOv3HmocNpcWZw5oa+ng7OjHS9e+maIKe3dZzmlbL1zKifbNa+0tTSJi0vI9XR5KbO8yu+yyIybu2If1Rv/PqFwITNMjQ0LJIbQ8EiFsxPpy0aSJG7dfybvW5t6ufWFkn0sfZ2vWs4JH7/36OhoZYg/s1/p1SuU5tqdxwpnzBMTk7h572mGS1Nfm7WVBUtnDicoNBzf958AsDA1Ijhd0pu2H19WXrz0Iyg0nFF928sTqvDIaN59DMx1bGVK2nP59kOF7ZQqJ9s9MTGJmNi4TOfvVKIoxob67DxyDh0tLco7OwDQoEYFXN2e4vZE8dJ3TuR0/ZW1D1XLOfHE4xXWVuYZ1in1aQ2mxobUqeLC+etunLt+j9aNa8qTp/xuO75FG+/2xFPh841/n5CYlITT5649FmbG8uQuVXBoBP6fcv4w9vCIKLzfvFMYdsn1PqUd7QCo6OKIlqaGQt9aSDmbbaCnS6niit0fACp9flrCw2cp/QG/5rE3J1Q5LualTUsr/bZIjScvba5TiaI4ORTlwk03Lro+oE2TWrlen2+hwM5Urtp2GB0dLUoVL4qJkT5v/QPYuOcEFZwdcClVHACn4rbsOHyWW/efYlukEBZmJtjbFub63Sc8fuGDsYEeV+8+5sw1xX6YDvY2SMkS2w+eoWXD6mhraWJuYsSAzi2YtmgjYwd0oqxTcWJi4njm/YYq5UpRzqlEhhjrVC1L5bKlGDnrd6YN64GpsSGnLt/h4s377Fg+Qz6esjhzatzAzoyZsxoNdXXqVy+Pmroar3z90dLSoE3j3FWazNY7J3KyXfOqVImi/HPpNs3qVUVTQz3HlxbNTAxzXWaqyM+yUEYmk7F1/z/o62rjUqo4j1+8ZNPek0we+uVSdH7F8MrPn/W7jlO3ajmKWJmTkJDIjX+fcOryHdbOHy8fT1nZ2NlYcfLiLcqWKk5cfAJ7/r7Ah8BgrK2+HASV1fmGtSpSyt6WwVOXMqJPO2wLFyIkLIJ7TzwY0qON0l/VA7q25NDpq0xfvJl+nZuTmJjEpr0pNzD0aNs4L5s5V3YePY+lmcn/tXffYVEc/wPH30fvRRAVFFCwgA17L7FHo0Yx1q89YomoKGqs2DViCSj2blQsUey9JZZoLKiIIgoqogjSe5PfH8iFgwMOQST5zet5fB7Z252dmZ2d/dzs7J70Qn3o9B/o62pj8amNNrGzZdWWA/zv+47UsDbHP/ANa3YeUeh9oxUrGKOiosy+45ew79Kad2HhrNlxRKHX/OQ0sl9XDpy4jNPCdQzs0Q5lZWXuP/ZniH1nhep9xeb9HL9wU/o6oJwkEgmN69bgwIkrtG1qJ72wtWhQi9DwSP6678viqT8WKs+Kll9eGxzR91t6/DiLiQvW0u+7b9DX0eZtaDiv3oQwasB30m27t2/Ohr3HePMuDGeHftLlxd13fI0+/sa9x3js8qJTq4a8C4tgzsptdGhRX9p/NqxTnQVuuzh2/gbd2jclIjKG+W4783yNmDzaWhpMWbSe6WMGULaMPvtPXMbHL5D5TsOBzHmzDgO7s9B9d2Y9VDHH2/c5q7ceYvSg7tKHYFw3etKiYW3MTU2IiUtg79GL1P30AOHnHAtFr72KKMp18XP6tPzqAj6/zwXo1akl2w6cJikphTYFTEcqLUosqDQrb8xvXhfY4nmSpOQUTMsZ06lVQxwGfift0Lp+04Sb93wZP9cdZWUlDni4MGFYb0LDoxjmvAwliRLtmtdj09IpDJq4WJq2qYkRM34axIY9x3Db/jsLJ4+gR8fmODv0w8hQn+2HzhIcEoa2pga1a1ShdWP5B0cikbBxsROum/bjtHAdMXHx1LAyZ9PSKTLfluTlU1HfNLPDY+FEPHZ5se3AKVRVVahcqQI/Dfm+0HWaV7kVoUi9fq7xQ75n6tKN2I+Zi7mpCSe2LVV428Ies6IozmMhj7lZOVbPGceiNbvx8QtEX08HhwHfMahnh2LPg66OFknJKazYvJ/I6Fi0tTSpYWXONtdpMiNO8o7N8hmjmffrDnr8OAs9XW0G9mzPNIf+XLh+V7qdvDZvWbE8m39xZtWWgyxw20VUbNyn0aRa0tHvnIwM9PB0n8Oy9fsY4rSUDDJoWs+WPW6zSuT2t6a6Gr9uP8Tb9+GoqapgW9WSLb9MlT6V+UPX1rwJCWPs7NVEx8RR2bwCzg79mOW6pYCUwcTIkGXTRuG2/TCb9p7A3MwE51F9uXTTu9D5LGdsyM5VM1ixaT+jZqxEWVmJWtUqM7RPZ5SVlAqsd7+AINrm+HGJnJrUs+Hcn3dk1tPW0qBh7er8/eCp3HfRFkf55bXB8mXL4LlmLq6b9uPo4k5aWjrlTYzok+Ohmg4t6zN31TYqVTCheo6Hhoqz7/gaffw0h37cvO9LP8cFALRvUZ85jkOkn1tbmLFsugPr9xxl7qptlDHUY0jvTmjKmzueh0oVTBg1oBvz3XbyJiQMc1MT3OY5YlvVQrqO49Be6OlosXpr5nliWs6ICcN7M6R3J+k6ScmpzFi+mfDIzGkrzRvWZProAdLPv9S1VxFFuS4qcm7lVFBdfG6fC5kPN/+y0ZOeHVsU2xSAL02SIe/+iiAIgvCv1bjnWNxcxtOsvvy56kLp0sLekZ/HDqB7B8WCn8/hefwS2w6c5txu1y+2D6F4xcQl0LKPIxsWT87zuZPS5qvOqRQEQRCK18s3IehoaxZ6TqTwdZXE8I4YQ/p3OXbhOsZl9GXeF1va/TvGUwVBEASFWFYsL/OzgoIg/HukpqXx9n04gUHvcN9+mMk/9s3zd85LIxFUCoIgCIIglAIRUbHYj3FBRUWZQT070L/7N187S4Ui5lQKgiAIgiAIRfbvGVMVBEEQBEEQSi0RVAqCIAiCIAhFJoJKQRAEQRAEochEUCkIgiAIgiAUmQgqBUEQBEEQhCITQaUgCIIgCIJQZCKoFARBEARBEIqsRIPKpOQUOv7PmaPnrwPgtNCDWSu2lsi+Ow+ZxsXr90pkX8I/Al6/o3q7IYSERXzxfc103cLyDZ5ffD+KiomNp8n347h+1+drZwXP45fo+D/nr52NIktITKJ6uyF4+z6XLmth78ix8zeKJX1F0rp805vanUcUy/7GzFrFlEXriyWtf6Oc14TSoCSvSwWR195L2pfuV0tDGYXiU6JB5cY9x9HX1aFHh+YluVsAGtWtgYmxQYnvVyg5NtYWVDGv8LWzIaWnq43jsF64rNpBenp6ie1395HzbD1wqsT2J3w+Az0dDPR0vkjan9sOngUEMXpmyfzM49e8Jgi5Obq44+MXKLOstPWrQulWYkFlSmoae49dZHDvjkgkEuny7P//khZNGUHt6lVKZF9CbiVxnAf36kifrm2++H4Ko3fnVnyIjOZCCY6SX75xv8T2JRSNgZ4OBvpfJqj83HZwsYTaT17XBOHrSEpO4cbdx7mWl8Z+VSi9Suy3v6/85U1iUjKdWzeSzYCyMr9uO8TBU1eJjUugro0VcxwHU61KJek61dsNYcsyZ1o1riNd9ufth4yeuQrfCzsACAmLYMm6Pdz2fkpSUjIVTU0Y1LM9A3q0BzJva00fM4AeHZtL/17kPJITF2/yx+0HIJHQsHZ15kwYjKmJkXQ/T56/Zsm6PTzwfY6ujhYDe7Rn3OCe0k7w7qNnrNxygCfPX6OirIS1pRlOI/rQ2M6GtLR0Vm45wKnLt4iIiqGskQGtG9XBZdLQPDvRs3/8zfrfjvHiVTB6Otp0bdeUKT/+gIa6GpB5O7m/4wIu7VvF7BXbuHLzPv17tOPnsQNzpaVoGaNi4li+0ZNLN+6TkJhEDStznEf1pbGdTaHTysuLV2+ZtGAtPn6BGOjp0u+7tvw05HtpPQx2WkJdW2ucR/WVbpOckkqdLiPZ6zabBrWrFVifTgs90NLUYLHzSCDzNpaNtQVx8YkcOXuNuIRErMxNmT52AI3qVJfuJzI6liXr9nD5hjcfMz7Srnl95k4Ygp6OlkJt69Cpq2w7eJo378LQ1dairq0VLhOHUs7YEC1NDTq0qI/Xueu52n6WjxkZbNhzjEOnrhL6IQrT8sYMs+/MwJ7tpet4Hr/E9Ts+TBhuzyzXLTx5/orVc36iQ8sGMul0HzGT56+CuX7Xh+UbPDEtZ8zlfZmjTsrKyly6cY9VWw/xOvg95U2MGPHDtzK/LZv+8SMeu7w4dOoqkdGx1LWxwmXSMKpamuV5bCOiYnHd5Mnlm5nneMXyZfnlZwdqVa9MWHgUHru8uHbHh9DwSCqYGDFucE96dmwh3b6FvSPbXKdx7s877Dp8DnNTE35fPx+A7QfPsOv3s3yIjMbG2oLpYwbIzUNCUhIzXbdw/s87SCQSGtapzmxH2bbpFxDE8g37uOvjj5KShOb1azJj3CDMyhvnWbbE5BSWeuzh9NXbpKam0bpxHdq3qJ9rvYL6CaeFHtSvVY1yxoYs3+BJWEQUVzxXY6Cng7aWpjSd/NpSTnm1y37d2+XZDtI/fmTjnuOcvnqboOD36Olq061dU6aO7o+SRILL6u14Hr8MZPa7AJf2riI4JIzBk5fy8MxW1NVUpXlYsfkAPn6B7FgxHci/P8wpr2uCInVZvUolIqPjOHbhOomJyTSoXY1Fzj9y5a/7bDtwmrDwKCqbV2DW+P/RsHbmuR7w+h0/jJvHll+cWeKxB7+AIPR0ten33TeMz9YXyZNfv7xs/V5uez/l8MYFMtv85nWe9b8d448DbigrKRVYLlC8veclv+tDfuf2s4AghjovIy4hEfuxLgD06tySZdMd5Par8tqyob5uiZRRKN1KLKi889AP26qW0uAoy4mLN2nbzI4Ni51Qkiixce9xRkx35eKelTKdV0FmLN+Mro4Wu1b+jLKyMo+fvURPVyvfbaYu2cCcCYNZNHUkiYnJTFm8jjkrt7H1l6lAZrA1ZPISOrRswOzx/+PNuzBmrdiKnq42g3t1JCk5BYcZKxk1oBtLp40iPiGJvx8+paxR5m32zZ4n+fP2Q36d+xNlDPR4+SaE4JAPeXZe56/dZcqi9Uwc3pvWTeryLjScpev28uZtKOsXO0nXi4lLYNICD8xNTfBYNCnfoK6gMqanpzNyuitKEgmr54xDX1eHU1duMXK6K7tXz8TO1lrhtPLjsno7E0fYY2ttiY9fIPPddqKnq82Q3p0K3DZLYesTwGOXFwN6tOP41sVoqKuxdpcXji7uXNyzEm0tDQDGz3UnKSWFzcumoKSkxAL3Xcxy3cKa+ROA/NvWPR9/Fq39DdeZY6hepRJh4VHcvPcYIwM9aR4a1qnOqi0HycjIkJvXpev2curSX8z8aRDVKlfE2/c5S9fvJTklleE/dJGu9yr4PT/NdaP/d9/gNLIPNazMZdJRkkg4uM6Ffo4L6fZNE4b07oRE6Z+bER8iolm704ufxw6kYnljLt24j8vq7VS3qkS9T8d57Y4jeJ64xHyn4VSuWJ5tB08zctpyzu12zXXuZhk3ezUx8QnMnzQM4zL6PH4WSMUKZQGQKElISEpmkfMITMsZc/jMH8z4ZTN2NlZYVCwvTWPl5gOkpqWx/OfRGH4auTt+4Qaum/YzY9xAmteviV9AEDNct6Akpw7dtx9mYM/27Pl1FvFJycz/dadM23z7/gODJi6mY6sG7Fo1g9TUNDZ7nmDQpEUc27JE+gUip8Vrf+Pyzfv88rMDlSqYcPUvb5at3yuzTkH9RJYrN+8T9C6MiSPsMdTXxVBfl54dW6KsnFkeRdpSdnm1y/zagbKSEsHvPzB+8PfUsDbH1/8lU5dsxNrSDPsurZkxbhBKSkoEvQvD3WU8AJoa6gSHhMnNQ3YF9Yc5ybsmKFqX63YfZcygHlz4bSUJiUmMmrGSfuPnU9XSjN2rZ2Kor8vqrQdxWrCOS/tWoqqSeamLS0hk7uodLJ36IzWszLnzyI8J89agp6PFUPvOcvNZUL/cu3Mrth88Q8DrdzK3iU9fvk3Xtk1QVlJSqFyFae/5yev6kN+5bWVphtemRbTuO5E9brOwtbZARSXv8EBeWy7JMgqlV4kFlU9fvM51EQTQ1dZk+YzR0kblOnMMrX6YwNHz1+nbra3C6b9+G8r4Ib2kI5xWFqYFbtOueT3piImmuhoj+3Zl7OzVfMzIQEkiYcehMxga6LHIeSTKSkpUr1KJoHehbNhzjMG9OhIRHUtcQiI9OjTHtFzmaIdtVQtp+kFvQ6ldowr1alYFwMKsXL75WbvzCP2++4ZRA74DoHqVSpiaGNH9x1k8ePKCujZWAGRkZFChbBnmThhS5DJevumN34sgLu9bJe38bata8Dr4PWt3ebFlmbPCaeXHYcB3fNeuGQBVzCsQGh7J5n0nCxVUFrY+AYwM9Zg2ZoA0f45De7HF8yR+AUHUr1WVG3cfc/+xP+d2u0oDoYWTR9Br9BzpRSK/tvX67XuMDfVp36I+ShIJ5qYmNKhdTSYPNtYWRMfGExIWSQWTMjKfRUTFssfrAqvnjJOO2FStXJHk1FTctv3OoO87oKaaeZr6BQSxYtZYurdvlmd5tTQ1UJJIUFVVQUtTQ+az2PgElk0fJS3HiL7fcvziDf6650s9W2ti4hLY8ftZ5k0aSqdWDQFYMHk4rftOxOvcdZkRzSx/3n7IQ79ALvzmKj0Hspff2FCf5TNGS/+eMNyevUcv8vBpgExQGRzygWNbFqGsrCxdttnzJP27fyO9IFlZmJKeno7zkg258tGgdjXGD+31z36G9WLCvLXStrn90FkqlDNi6bRR0nXq2Eyg3YAp7D9+SXrOZRceFcORs3+yatY42jWrB0BVSzNeBr/n6Llr0vUK6iey3H7wlNM7fpG2M0CmPSjSlrLLr13m1w6yRpwg8xw6fvEm9x8/x75LazTU1VBRUUFZSSnXdgUpqD/MSd41QdG6LF+2DOMG9wRAW0uDH7q2YYH7LnavmkH5spl1+mP/bmw/eIbXwaEydTOqXzdqVa8MQNN6tjgM7M4Wz1N5BpWK9Ms1q1py/MINJo6wByA0PJK7Ps+YNqa/wuUqTHvPj7zrgyLntqaGOgAaamoFHnt5bbkkyyiUXiU2pzIyJk46ApFdbZsqMgGJupoq9WpW5emL14VKv8+3bVi4ZhfrfzvGh8hohbapVc1S5m8jQ31SUtOIiY0HwNv3Ba0b1UY522hPnRpWfIiIJjwqBlMTI5o3qMnQKcs4cvYaSckpMun17NSC4xduMmflNvwD3+Sbl4TEJJ6+eE3rJnVkllerUgnTcsb8/dBPZrn9t62LpYx3fZ5Rw8o812hC26Z23HnwtFBppaalkZySSnJKKimpaTLr2n0KBLM0rWdLaHgkkdGxCpUDClefWWysLXK1Lx0tTcIiogDwfvIcK0szmc7RxtocVRUV/AIy22B+batlo9qkpqUxbMoyrt/1ISMjI1ceDPV1gcxRmJwePHlBeno6rRvLHvdvmtYjJi5BmgcADXU1urVrqlC55dHR0pSZVgJQvqwRHyIyy+Qf+IaExCTaNrWTfq6qokINK/M8z8dHfoHUsKokDSIKoiSRUK5sGaLj4mWW9+zUQiagTE9P5/nL4Fz10iDbtAWZ5TmCr/JljUhNy9bOH/nRulFtmXVUVVRo0bBmrnMry/OXwaSlpec6Jxvm2FdB/USWxnY2Mu0sJ0XaUnaf0+fJY2piJK2noiioP8xJ3jVB0bqsnqMdG+jpoK6mKvNFxfDTA1DZt4PM8zu7JnY1CA2PJCIqd1+kaL/cq0srjl38560BZ67+TcXyZaUDAQWVq7DtvSA5rw+fc27nR15bLukyCqVTiY1UJienyL19pqOpmWuZvq629EKnqLH/64G1pRlbPE+ybrcXvbu0Yuro/uho5U4/i4GertzlWZ35u9Bw7vk8w+vcP6+7+JjxEYCw8CiMDPTYsHgy+45dxH3HYZZv3IfDgO4M69MZiURCEzsbDni4sGHvMXo6zKFZPRvmTBiCZbaOL0tsfCLwTwCSXRkDXaJzBCTljMvkWu9zyhgbnyg32C9joEticgopqWnSkbKC0uo67Gdevw0FkJnLB0hvNWfR19MGMm/JyiuzPIWpzyx5Pln76XodEhrBi1dvadh9jMzHaenphIVnBp75tS1jQ31+Xz+fTftOMMFlDeXLlmHamP60aVJXmpbmp3aflJycKxtx8QloqKtJRwmylDHIrJPomH8u9mXLGBTpNpG8epZIIONTZbwLCweg/cApMuskJifnOXIR+iEyz9uzAC/fhLBl/ynu+TyTliVSTnCdc85gZHQc6R8/UiZH2nndps5ZtqxpBlltMy4+Uf65pa9HYFCI3DQ/RETLPTY586BIPyGvjDkp0pay+5w+Ly4hka2ep7hyy5sPETF8/PiR+IREWuexj8LKrz/MSd41QdG61NPVzpVezuMklSM419GWPX76up+Cz8ho6XmXRdF+uXv7Zvyyfh/3H/tTr2ZVzly9zXft//kCWFC5Mj5mFKq9FyTn9eFzzu3808/dlku6jELpVGJBpa6Oltxvw3EJibmWRUbHUsnUJN/0IqNzX5g6tmxAx5YNuO/7nDkrt+G0wIPNyz7/3XzaWhr07daW4T98m+uzrNcTqaupMqxPFwb37sSpy7dY4LaT9PR0fuzfDci8/ePu4sjb9x9Y4rGHgRMWcWHPilwnsoGeDhKJhPDImFz7ioiKRT9HcFRcU1AM9XTw9X8pd5+a6mrSgFIR6xZOIjklFSDXdvHxssc56/gZl9HPXCCnQPJGMRWtT0Vpa2lga22B26f5Y9llBb6Qf9syLqPPzJ8GMXG4PdsOnmbMzFV4rp0rHaWI+tTu9XRyXwgN9XVJSk4hITFJpgxZoybZ81DUY17Q9tqamihJJPy+fl6u+VQaGvLnU5Yx1MPn2Uu5nyUkJjFwwiI6tW7EjhXTMTHKvBB1Gpx7Dq4E2cwZ6OmgJJHk+jIVE5cgd185t8/JUF8314gVQER0DPpyAhTIDBqSklNITkmVmd+dMw+K9BOg2BsQCmpLORW2z3Na4EFcQiKuM8dQxdwUJYmE2Su3FTxSmUfec56jBfWH2cm7Jihal0URExcvM+0gawBD2hdlo2i/bKCnQ9umdTl56S/Myhtzz8efBZOHS9ctqFwSJIVq7wXJebg+59zOP/3c7aGkyyiUTiV2+7ucsSFvQ8NzLX/w5AXpHz9K/05MSuaejz+21v/MxTE21CciR+fl80z2XVrZ1fv0FHFRX6basHZ1HvkFYlrOCLPyxjL/VHOcmMpKSnRv34wfurblvpz9mpYzxnXmGMKjYqSjedmpq6lS18aKq7ceyCz3D3zD2/cf8p1bVRRN7GzwCwji/YdImeVXbnkXep9VK1ekVvXK1KpeOddt1rs+/jJ/X/v7EeXLlpGOABgb6hGR46L/OI9gBQquT0U1rF2dgKC3aGio5TrG8kZ88mtb2loaOA7thW01Sx5k+zwkNByJRCL3gQW7mtaoqarkOu5X/vLOvF1duVKubQqirqZKcnJqobera2OFkrISr96G5qqLvEYja1a15MmLV3Jfbv/0RRDhUTH8NLinNKCMiUsg+P2HAvOioqJMFXNT/rrvK7P84ZMXhS4XQOO6Nfjj1kOZW8ppaelcv/M4z3ZubWGGsrJyrjw8yJGHwvQTisqrLeVFXrvM2Q4yMjK4ec+XQd93wNrCTDrq/fS57O1PdTXVXLeujQ0zA66ct4jlfSGFgvtDkH9N+BJ1mdPdR89k/r544x7ljA3ljkYWpl/u3aUV56/d5dKN+1SrXBFri3/emFBQuRRt7x8zMkhITOJjAVMjclLk3M764pSUkv+0hbwUVxkhMw5ISyu5d/sKxafEgso6Narg7Zu78SQkJeO8eD2P/AJ4/OwlkxeuQ1tLgx7ZXjnSpJ4Ne49e5ENkNKlpaVy6eZ9jF2R/9cJ1oyc37j7mzbswfP1fsffoRepme3L5c4zo+y2BQe+YuGAt1+/64OMXyLk/77B53wkg8+EC9x2H8fZ9ztvQcP6678v5a3ew+zSqsPvIec5e/ZuXb0J4+SYEj91H0dfVlpn3k92kEfb8fvoqWzxP8vxVMNfu+DBxwVpaNqotfTq3uLVoWIv6taoxbs6v3PZ+gn/gG9y2/c7F6/cYN/j7YtmHRCJh6/6THL9wg4DX7/A6d41N+04wasA/oxdN7Gy5cO0uvv6v+JiRgV9AEGt2HpH5RlzY+lRE22Z2VLOsyMhprpz9428eP3vJtb8f8eu2Q8QnJAH5t60/bj9k37GL+AUEERzygaPnr/P8ZTC1s40s3fd9TuVK5eXe5tHR0sRhYHcWuu/m9JVbBLx+x+Ezf7J66yFGD+peqDcgZKlWpRInL/2F/8tgXr6Rf2vUW+A7AAASJklEQVRXnjIGugyz78z0ZRvZd+wij/wCuO39hB2HzvDIL0DuNu2a16OqZUVGz1zFpZv3efg0gEOnruIf+IaKFYxRUVFm3/FLBId84M4jPxxd3BW+3TWyX1d+87rA/hOXCQx6x4Vrd1m15eBnTQEY9kMXIqJj+PmXzTz2f8mDJy+YuGAtAP27t5O7TVkjA3p0aMZ8t538cfshL169ZfeR85y4eFNmvYL6CUUp0payK6jPy9kOJBIJFmblOHHxJi/fhOAXEITL6u2EfJD9QlC9ckXuP/bn5r3HBL0LJTE5BXNTE0xNjNi49zhJySnEJySxYc8xXrx6K92uoP4wJ3nXhOKqy/xsPXCKY+cz+yLP45fZeeis3JHULIr2y22a1CU1NY0dh87mephOkXIp0t6v/OVNvW4OBL5+V6gyK3Juq6upYmFWjj1eFwgO+cBbBb78fYkyxickYdd1FAdPXSnU/oXSocRuf7dqXIfVWw/x/FWwzDe42eP/R8Drd4yZtZqY2HjsalZl6/JpMrdPZ/40iIXuu+k2fAZpaenUr1WVFbPG4vDzCuk6ScmpzFi+mfDIzNtZzRvWZProor3/qnzZMniumYvrpv04uriTlpZOeRMj+nyaBK2poY6v/0v2Hr1IXHwiJsYG9OjQnBH9umZ+rq7Gr9sP8fZ9OGqqKthWtWTLL1Olc+xyala/JusWTsJ9x2HcdxxGV1uLLm0a4+zQV+76xUEikbBxsROum/bjtHAdMXHx1LAyZ9PSKcU2OmpuVo7Vc8axaM1ufPwC0dfTwWHAdwzq2UG6zg9dW/MmJIyxs1cTHRNHZfMKODv0Y5brFuk6ha1PRSgrKbH5F2dWbTnIArddRMXGYaivS4sGtVBRyXxwJL+2paujhde566zcfJC0tDTMzcqxaMoImYvN1VsPaNWojtz9Q+YT6Xo6Wqzemlk203JGTBjeu1BPxmc3fsj3TF26EfsxczE3NeHEtqUKb+vs0A8jQ322HzpLcEgY2poa1K5RhdaN5c+5k0gkbF46hWXr9zF96UaSU1KxrFieFTPHYGJkyLJpo3DbfphNe09gbmaC86i+XLrprVBeendpRURUDOt2H2Wh+26sLc1wmTSU2a6F/wk9IwM9PN3nsGz9PoY4LSWDDJrWs2WP26w8b38DzJ0whMUee5iyaB0pqWk0qlsD93mOOMxYKV2noH5CUYq0pewK6vPktYPlM0Yz79cd9PhxFnq62gzs2Z5pDv25cP2udLuu3zTh5j1fxs91R1lZiQMeLlhWLI/7fEeWeOyhhb0jqqoqdG7dkKmj+0t//rag/jAnedeE4qrL/Cx2Honb9t/xWZHZF40Z1F3myfKcFO2XlZWV+a59M3YdPke3HEGlIuVSpL37vQjC3NREobeb5KTIub3IeSQuq7fTecg0+nzbmnlOwxROv9jKGBAEQNum9QpdRuHrk2QU9IhhMfph3Dwa1K4m90XdgvBf5BcQRM9Rszm9YxmVK4mfOhOE7ErymhDw+h3fDpvOHwfcCnxo6nOt2HyAu4/82Oc+54uk/9NcN6pZVpS+uui/aNfvZznzx9/sdZv9tbMifIYS/e3vqaP7c+DkFULDIwteWRD+A9x3HKZ3l1YioBQEOb7GNeFLjaOkpqVx9Nx1+nz75X7S8OGTAHp/2+qLpV8aPHgSoPAr84TSp8Ruf0PmZPk5jkN4+ea9dOK+IPxXxcYnYGNlztA+8l+qLAj/3/0XrgnvP0SSkJjEhj3HUVVVpnuH5l9sX38edPtiaZcWK2eP/dpZEIqgRINKyPw9UUH4/0BXW0vmV14EQcjt335N2Lr/FJ7HL2FjbcGGRU6Feg2bIPzXlOicSkEQBEEQBOG/qUTnVAqCIAiCIAj/TSKoFARBEARBEIpMBJWCIAiCIAhCkYmgUhAEQRAEQSgyEVQKgiAIgiAIRSaCSkEQBEEQBKHIRFD5mQJev6N6uyGEhEV87awA8MuGfYyc7vq1s5Gn0p6/L8FjlxcDJy76Yr/gIQiCIAiliQgqBbkcXdzx8Qv82tlQyJfKa1HTHdmvK8EhHzh85s9izJUgCIIglE4iqBRySUpO4cbdx187Gwr5UnktjnQ11NWw/7Y12w+dKaZcCYIgCELpVWK/J3Xb+wmDJy/l4ZmtqKupSpev2HwAH79AdqyYDoDTQg9srC2Ii0/kyNlrxCUkYmVuyvSxA2hUp7p0u/SPH/HY5cWhU1eJjI6lro0VLpOGUdXSDMi8PW0/1oWtv0xliccengUGYWSgx4h+XWnfoj6L1uzmr/u+aKqr07lNI2aMG4iqioo0DzWrWhIaHsWJizeJS0ikTo0qzJ0whGpVKuVZxqiYOJZv9OTSjfskJCZRw8oc51F9aWxnA0Dv0XNpbFeDn8cOlNluwrw1SCQS3FzGF1gugNdvQ5nvtpM7D/3Q0lSnV6dWKCkV7vvB3UfPWLnlAE+ev0ZFWQlrSzOcRvTBQE+Hoc7LiEtIxH6sC5D5M2rLpjsw2GkJdW2tcR7VV5pOckoqdbqMZK/bbBrUrlao/J2+cou1O714FfyeihXKMmVUXzq2bCD9vIW9I4ucR3Li4k3+uP0AJBIa1q7OnAmDMTUx4llAUJ55LUh+9VxQuqev3GLDnuMEvH6LrrYWHVs1YL7TcLn76dmhOR67vPD1f4VtVQtFDo0gCIIg/CuVypFKj11epKSmcnzrYm4eXkvT+rY4urgTn5AkXWftjiPsO3aR2Y6DObxhAZVMTRg5bTlJySnSdRISk5j36w4WOY/g3olNjB3ck0VrdjPYaQk9OjTnryPr2LHyZ85cuc3+E5dl8rBm5xG0tTQ4s2s5fxxwo5KpCSOmu8qkn116ejojp7viH/iG1XPG4blmLo3tbBg53RVv3+cA9OrSipOXbvEx2xy7hMQkrt56wHftmylUro8ZGTjMWElCYhI7V/7M5qXOhEfFcODkFYXrNyk5BYcZK2nduA5emxayc+UMurRpTFkjA6wszfDatAiAPW6zuH9yEwsmj1A4bUXzd+PuY6Ys3oD9t63x2ryQ/t2/YdKCtTx5/lpmvalLNtC6SR3+OOjO2Z3LSUpOZs7KbQBFymt+9Zxfun/efsjkheto2ag221yns3DKCJrXr5XnfiwqlsfYUJ87j/wUypcgCIIg/FuVyqDSyFCPaWMGYKCng4a6Go5DexEVE4dfQBAAMXEJ7Pj9LD+PHUinVg2pWrkiCyYPJzUtDa9z12XS+rFfN2pYmaOiokzfbm0x1Nelnq01Xdo0Rk1VhaqWZnRq1ZA7D5/JbFfO2JAJw3qjp6OFgZ4OLhOHkpqaxtHzsulnuXzTG78XQaxbOIlm9WtiW9UC51F9+aZZPdbu8gKge/tmREbHcuu+r8x2qioqtGlSV6FyXbx+j9dvQ3F3ccTO1ppa1SuzbPooyujrKly/EdGxxCUk0qNDcyzMymFb1YKh9p2pXKkCykpKaGqoA6ChpoaWpgZqqooPaCuavzU7D/N9pxaM6Pst1hZmDOvThdaN67LZ84TMeu2a16NnxxZoqqtRxkCXkX27ctv7CR8zMj47rwXVc37prtl5BPuubZjq0I9GdarTvkV9OrdplO/+bKpa8PTF63zXEQRBEIR/u1IZVNpYW6AkkUj/VldTRUdLk7CIKAD8A9+QkJhE26Z20nVUVVSoYWWe6+Jdw0r2drWBnk6uW9gG+jqER8Xk2M4cSbY8aKirUdfGKs/g4K7PM2pYmVPWyEBmedumdtx58FS672+a2XHswg3p56ev3KJTq4aoqaooVK4nz19hYy27H4lEQoNa1eTmSx5TEyOaN6jJ0CnLOHL2Wp6jr59Dkfx9zMjg0dNAmXIC1KlRJVf91qpmKfO3kaE+KalpxMTGf3YeC9N+svuYkcFj/5e0b16/UPsz1NchKibus/MrCIIgCP8GJTansjAM9HTkf/DprvG7sHAA2g+cIvNxYnIyWpoaMsv0dLVzJaOlqS4nbdnXvuhoa+ZaRV9Pm/DImFzLAWLjEzHUz53vMga6JCankJKahpqqCr06t2Tqko3MmzSM9PR0/rj9kA2LJytcrvDIaLmjkro6WrwPj5SbN3k2LJ7MvmMXcd9xmOUb9+EwoDvD+nSWCaQ/hyL5C4+IJjUtjenLNjFz+RbpOqlpablGGg305I/AFuU1PYVpP9lFRMWQlpaOkaFeofanqa5OeIT8diMIgiAI/xUlF1TmEaxERscWOiltTU2UJBJ+Xz8PFRXZImhoqH1W9nKSNxL2ISKaypUqyF3fUE8HX/+XuZZHRMWiqa4mDZbaNKmLupoqV289IDU1DV0dLZrUy3yQR5FyldHXyzXvECA2LqFQ5VNXU2VYny4M7t2JU5dvscBtJ+np6fzYv1veG8k5hjmPnyL5ywrcFkweTv2aVWV3oVS0oFYRn9t+DPV0UFZSKnSbjYqNk/vlRhAEQRD+S0rs9rexoT6QGWRlJy8QK0hdGyuUlJV49TYUs/LGMv+MDAo3ipSXB08CSE9Pl/4dHRvPPR9/bKzM5a7fxM4Gv4Ag3n+QHS28cstb+lQ0gLKyMt07NOf8tTtcuH6Xrm2boPzpyWhFylXdqhJPX7wmOkfQ+/DpC5m/k1NSSU5JLbCcykpKdG/fjB+6tuX+pweKsp7OT0qRvS1ubKhHRI5pAo+fvZT5W5H8aWtpYGOdOc8wZzlNTYwKzHN2eeU1v/IrUs/y0lVWVqZalUpcvnE/z/y8eReWaxQ1JDQCk0/TAdLT03kbGl6oMgqCIAjCv0GJBZXmpiaYmhixce9xkpJTiE9IYsOeY7x49bbQaZUx0GWYfWemL9vIvmMXeeQXwG3vJ+w4dIZHfgHFkt/EpGQmL1rPw6cBPPILwNHFHV0dLXp0bCF3/RYNa1G/VjXGzfmV295P8A98g9u237l4/R7jBn8vs27vLq24/rcP1+74SJ/6VrRcHVs2oJyxIZMWrOXh0wB8/V8x320nr9+GyuxjgONCpi/bJDevwSEfcN9xGG/f57wNDeev+76cv3YHOxsrIDOgsjArxx6vCwSHfODt+w8ANLGz5cK1u/j6v+JjRgZ+AUGs2XlE5pa5ovmbONyenYfOsnrrIe4+esZ93+ccPvMnJy7dVOTwSOWV1/zKr0g955XuT0O+58DJK9L6u3bHB8/jlwDw9n1O+0FTZB7mSklNw9f/FXa21gBsPXCadgMmS9MTBEEQhP+KErv9raKijPt8R5Z47KGFvSOqqip0bt2QqaP7c/H6vUKn5+zQDyNDfbYfOktwSBjamhrUrlGF1o3rFkt+v+/UAm0tTcbOXk1MbDx2ttZsXz5N5h2b2UkkEjYudsJ1036cFq4jJi6eGlbmbFo6RWakEqB6lUqYGBsSF59I3U+BnKLlUlZWZvMyZxa47eJ/kxajoa5GlzaNmT5mAJduZo6gpX/8yPNXwQzr00VuXjU11PH1f8neoxeJi0/ExNiAHh2aM6JfV+k6i5xH4rJ6O52HTKPPt62Z5zSMH7q25k1IGGNnryY6Jo7K5hVwdujHLNd/5kUqkj+Ab5rZ4bFwIh67vNh24BSqqipUrlSBn4bIBuCKyJnXOROH5Ft+Reo5rzro2LIBS6b9yOZ9J9m49zi62lp83ynzi4aaqioGejrSUXmAG3d9yCCDpvVsAdDX1cZQX0f6dLkgCIIg/FdIMsQPE+fitNADbS1NFk1R/P2MhdX3p/k0b1CTSSP6FHvaT1+8pv/4BVz7fQ06WrkfOPqvK03lHz/XDXU1NVbOHvtV8yEIgiAIX1qpfKVQafAlY23/wDc8ehpAr04tv0j6D568oHObxl89oPpaSkv5ff1fce2OD47Den3VfAiCIAhCSRAjlXI4LfRAS1ODxc4jizXd129DiYmNZ+7q7VhWLM+q2eOKNX2hdMma1tG+ReHeaykIgiAI/0al8j2V/1WTFqwlMCiEVo1rM99p2NfOjvCFiWBSEARB+P9EjFQKgiAIgiAIRSbmVAqCIAiCIAhFJoJKQRAEQRAEochEUCkIgiAIgiAUmQgqBUEQBEEQhCITQaUgCIIgCIJQZCKoFARBEARBEIpMBJWCIAiCIAhCkYmgUhAEQRAEQSgyEVQKgiAIgiAIRSaCSkEQBEEQBKHIRFApCIIgCIIgFJkIKgVBEARBEIQiE0GlIAiCIAiCUGQiqBQEQRAEQRCKTASVgiAIgiAIQpGJoFIQBEEQBEEosv8DbabNJ+u/qSEAAAAASUVORK5CYII=)",
"_____no_output_____"
],
[
"![Screenshot from 2022-04-04 10-46-52.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAnIAAACWCAYAAABaZGUoAAAABHNCSVQICAgIfAhkiAAAABl0RVh0U29mdHdhcmUAZ25vbWUtc2NyZWVuc2hvdO8Dvz4AAAArdEVYdENyZWF0aW9uIFRpbWUATW9uIDA0IEFwciAyMDIyIDEwOjQ2OjU4IENFU1SIJOUMAAAgAElEQVR4nOyddVRV2duAn3vp7pASBQVU7O7WsbvbMcfudoyxY+zubsfu7m5FFBUUFQWRbr4/kCMXLnBBUPl++1nrrnXuPnvv8+5+d8vi4+PjEQgEAoFAIBDkOOS/WgCBQCAQCAQCQeYQipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQcrwid6hJA5ZoyliiKSPg8aNfLY7KRPj7s6NUMVaaGfJ49cpfLU6OjUfB743H1s1Svro+cVyG3MbHxnJ/0QJ2linBShN9lhtos9EpNyfatyE6NDSbJBYIBIKchXp2efz+ymX2Vq0IgG3VajQ5cUapPa8D+znaoikARk7OdHjqqdResI83G51yA2BWyJ02dx5kg9Q/j7fnzvD5/j0AHq9aQcE/e/5iiX4/TnbuwPNtW9K1V3HOfIoMGKTUXaOjJ7GvUTNVtzem/M3NKZMAqPzvItz79lN4v97RjlDfdwD0CYtGrp56kfE+foznO7bx8fo1Qt69JS46Gi1jY4zzu2BbpSr527TDxNUt3fAIEjj3V2+erF2tYBbs442mkREaeno/RYZg7zc837YVn9MnCXjymMgvX1DT1sYkvwtOzVtSpN8A1LS1f4osAoFAoIxsU+SsSpdBQ1+f6JAQPl6/RmxUFGqamins+Zw8IT1/ffmCr14vMcrrlMKe74Xz0rNdterZI3QGeXf+HPtrVSN3vfo02H8oQ27NixRFXUeHmPBwrMuVzyYJE/gROQXpE+7nx4mObXl7NmVnJfzTJ8I/feL95UvcnjmdLq/eomtt/QukzFmEvPXhybo1AMjkcgr17otZIXci/P0xzp//p8jwct8eTrRvQ1xMjIJ5XHQ0frdv4Xf7Fs+3baHpqXNomZj8FJkEAoEgOdmmyMnV1bGpVJk3R48QEx6O362b5CpfIYU97xPHFf8fP4Z7n79S2PO9eEF6tv1NFLkXu3dm2q2xcz46eb4m9MN7zN0LZ6FUKfkROX8X8rdtj6Gjo9J3VqXL/FxhkhDs/Ya9VSoS8u6tZGZRtBgWxYqjrqtLiI8PvpcvEuHvj2O9BkKJU5FPd+9AfDwAeRo1ofK/i366DLaVqqCmpUVcbCw2FSthVboMWsYmhPq+w3PndiL8/fF/+IAbkydSaf7Cny6fQCAQQDYqcgB21Wrw5ugRIEERS67IfX35gqBXXgBoGhkR9fUr3ifSVuRkamrYVqqi/IMyWRZKnzaxkZG83Lfnh/zQsbREx9IyiyRSTqbk/InxqCqunbqkOUX6K4iPi+N09y6SEqdvZ0/tTVvJVaGigr246GheHz6IQW7Hny/kL0aWybwUFRwsPRs7O2eVOBlC29ycOlt2YFqwYIq0KzJwMFvd3b6l7SGhyAkEgl9Gtm52SDoF6nvpQor3SUfjnJu3BODdubPERkUp2Av7+JFAz+cAWBQrjqaRkdLvaRgY8OHqFY40a8waGwuW62ux0Sk3Z3p04+vLF6nK+e7cWS4M6s/OMiVYZW7EMl0N1tpZcbDhH3j9ty+FfY+tm9lSyJVwPz8A3hw5LC3oXqIp4/LIYal+KxG/27ck+8dat0jxPtDzOef+6s2Wgi4sN9RhhbEeWwrk51CjejxevZKIz5/T/UZm5cxsPAJ47d/LoUb1WGtryTI9TdbZW3OkWeMUI6//H3i+dTPvzp8DQENfnyYnz6RQ4gDkGhrkbdIMi2LFVfL35d7dUhpdGjpIqR2vA/slOxcG9kvx/u2Z0xxr3YINeexZpqfJaksTthVz50THdngd2E9sZGQKN7GRkdz7dx67K5ZllYUxyw202ZQ/L2d7/cmXZ09TlTfyyxcujxzGJhcnlhtos9bOimNtWuL/8AHquroqhTmRwOceXBjYTyFMd+bMksK6o2TRFG78Hz3kTM/ubHZ1ZoWRLivNDNlRsijXJ00g8ssXpd+5O28OSzRlbHJxksJ+658pbHZ1Zqm2GnsqJyx3yF2vvlIF3CivE4bfzKOCgzIURoFAIMhKsnVEzrxwEbTNzYn4/JkPV68QHxuLTE1Nep+4Ps7IyZncf9TjydrVRIeG8v7SReyq15DsJVUC01of9/b0Kc726Ul8bKxkFuzjzdMN63i5bw/NL1zBtEBBBTcv9uzieNtWKfwK9/PD+/gxvI8fo/iwEZSbNhOA2IgITnXpmMGYyBhvz5zmcNOGxISHK5gHvvAk8IUnb44dJS46WunIZSI/Imdm4jEuJobT3TrzfPtWBfOwjx95degArw4doEj/gVSc+2+mZPodSboQv/jQERg5/ZqRo+TcnjGNaxPGKphFBgYSGRhIwONHeO7YRtu7DzEtWEh6H/bhAwfq1cb/0UMFd0GvX/Fk3Ro8tm6mxpr15GvVRuF9qO879latRNDrV5JZuJ8fL/fu5vWhA5QYpShHejxYsoiHy5aobP/+ogVcHj6E+Lg4BfPPD+7z+cF9nqxZRYP/DqeqRAd7vyE6OJhDjevje+miZK5rnSvN74Z/+kSwjzcApm4FVJZXIBAIsppsVeSQybCrUo0Xe3YRFRTE5/v3sCheAkiYbnp7/iwAuSpUJFf57yMZ3ieOKSpySdbH2VVNXZE791dvNA0McO3UBSMnZ0Le+vB41QoiAwOJCgriyqjhNDhwRMFN3kZNMHTMg769PXkaNMK0QEFkcjnvr13hzqwZxEZGcnfeHNy6dsc4X37UtLRoefUmgZ7POdmpvSR/xTnzJT9/dB3UhUH9JSWu8F/9sateg7iYGL56evLmxDH8H9zHtUOnNP34ETkzE483Jk+UlDirMmVx7/MX+ja2BHo+586cWQS98uL+ogWYFy2Ga8fOmY6b34WYsDA+XLsq/XfpkL3KvaqEvX/P9UkTgISR1VJjxmPqVoCIwC98efoEr//2o2ttraDExcfFcbxdK0mJy9+2PU7NmqOhp8/H69e4M2820cHBnO7eBYuixTDO7yK5PdOzu6TEaRkbU7BHLwzsHQh8+YJnG9ZxY/LEDMlfbNgIXDt1wfvYUSkcrh07S7uJk47wvT58UBqxlMnl5GvVBuuy5YgJD8frwH4+XL1C2IcPHGpcnza37itdxhAfG8uxdq3wvXQRmZoaFkWLoWloiEXRYqnKGB0SwokObaRRzaJD0h+BFwgEguwiexU5wK56DV7s2QUkKGSJityHq1eI/rYOxq5qNXQsLDAr5I7/o4e8OX6M8jNmS34kKnJqmppKp64S0dDTo9WNOxg65pHMnJu3ZGfZkgD4nD5FbESEwnEBcg0N2j16lmJHrX2t2sRFR3N7xjTi4+LwPnEc43z5QSbDskRJhWMoNI2MsCxRMlPxk5y4mBhpGsvExTXF2pviI0YRExaW/pTVD8iZ0XgM+/iRe/PmAGBVqjTNzlxArqEBJBw9k7tefbYWLkB0cDB3Zs/MlCL3fNsWPl6/ljKYcjklRo1J1d2jFct4cyT1nboflPipCoEvPKXdjNrm5r/N+jf/J4+lkVSXtu0pNnS4wvuyU6YRExamYPbq4H/SaFSRgYOpOHue9M6+Zi3MihThSLPGxEZGcn/RAqosWgokjHolTpmraWvT7NwlhZHawn37sb1kUamcq4KBvQMG9g588XgmmelaW6fMt/HxXBk1Qvpbc/0m8rdpJ/0vNnQ4p//syrON6wn78IHbs6YrdGKS4n38GJYlSlJn206FPK+MoFdeHGneRFJ6y0ycTJ4GjVQOn0AgEGQ1P0WRS8T34gWKDBwMgPfJ72um7GvWBsChdh38Hz0k4PEjQn3foWdjK00HQcLuxLQUmII9eqWoiC2Kl8C0QEECnjwmLjqa0Pe+GObJq2BH2bEogELjEfbhvSrB/WHk6uroWFoS7udH4AtPvPbvJW+TZgp2MrruKKNkNB699u2R1jUW7NFLUuIS0be1I1f5CngfP8aXZ0+J+PwZbXPzDMn0bON6peYyNbU0FTmv/Xsz9B1VifD3l551La2y5RuZQd/WVnp+dfAAhfsNSHF2XfL847l9m/RcuG/K9XaO9RqgrqtLTFiYwuj4myOHpWfXDp1STLcb5smLW+euPFic9RsBPj+4Lyl7ZoXcFZS4RMpPn8XzrZuJi4nBY8umBAVVyeYLTSMjGvx3ON2NRwGPH7G/dnXCP31CTVubastW4tL+9xiJFQgE/7tk+80ORk7O6NvZA+B7+aJ0pID3t/Vx5oWLSFN8DrXrSu7eHD8GwPvLl6T1L+kdO5J0ejYp+nZ20nPydWeQsMD65tTJHGnehN0Vy7KtmDvbihbi0pDvC82Tb8DITgp27wEkTPscbdWcPVUq4Llj20+TIaPx+PHWTen5TM/uChsqEn/e39ITIMzvYzZI/XORyeVKn381Jq5u0qh16HvfhA0O7dsonMOYnKTpt8nFKUXaLdVWk0bxkqZd0vV0STtsSbEuU/aHwpMafnduS8+2VaoqtaNjYSFNIUf4+/PV66VSe84tWqm0e/x4+zaEf/qEtqkpTU+fF0qcQCD4Lcj2ETlImF7z2LyRCH9/Ap49RdfCMuGcKBKmMBPJVbESGnp6RIeG4nPiOAW6duf95e8LkNNaHwegZ2Oj1FyW9DT+b4pkIlfHjuLu3NkpFkv/SkqNm0hUUBAPly0hPi6OD1ev8OHqFXStrCj81wCKDhqSrafJZzQewzOomMVFR2dYpvRuaMisu6Q3O2QELWNj6Tnp6NzvQN2tOznZtSNvz5wmPjYWz1078Ny1A9OChSg5emyKDQsZSb+kaRfxJUB61re1U2YdHYvsOV4n4tMn6TmtjQl6uXJJN6iEffyodEOKiYtrut/zf/SQgCePAai8YDFWpUpnVGSBQCDIFn6KImdXrToemzcC8PH6tYTjQ74pAg616kj21DQ1sa1andeHD0obId5fuZwgqK4uVun07lObIk2NZxvXc2d2wm5UDT09yk2biWP9BujlskGuocHLvbs51qZlhvzMCuQaGlSav5BCvfpwf9ECnm/bQnRICGEfP3Jtwlg8d+3I1tPkMxqPSZXgP3btxbJkqTTt/05TkZnF0DFPwjRdfDyh730zNV38I8RGRKT6TjdXLhofO4XPqZM8WLKIN0cPEx8XR8DjR5zo0JY3R49Qc91GyX5i+snU1Oj04nWa30119DEV8+TT7FmFgr/JOmdJiU/yLrUz7VTJ76Hv3knP1koONhcIBIJfxU+ZE0p6ZMjHG9el3X7quropNi/krpMwvRrx+TMBT59IUyi5ylfIsIKRHk/Xr5WeK85bgHufvzBwyC01EtEhIVn6vYxi4upG1SXL6fLqLWWnTEPDwAAA/4cPuDk146NI2YWOuYX0HB0Sgr6tXZq/7GrcfyaaRkYKx0786OHQqRGX5AiYpIR9+JCuW/uatai/7wAdnr2gQLc/JSXMY8smvA7sl+wlpl98bCzEx6eZdnq5vo/Waht/70ikNqqXdNQuK0k6ahz63jdVe6G+39/pWGW+A2FZshRNTp6lycmz6FqJ2zkEAsHvw09R5PTt7KX7Uz/fv4f/w4QL722rVEVNS0vBrkPdP6Rnz+1bpZGH9KZVM0PQ69fSs7Lz6fy/bbJQSpLefXwqjW1WoWlkRImRo2l25vtC88SDaNPlJ8iZ9IqsVwcPZMs3fkeSLrC/Nf0for5+zRJ/1bS+T5snHQlKStKjT9LD0DEP1ZavokKS3ai+SfJPZtMv6eaG95cvKbXzKclatqzEumw56VnZHbeQcJ5d4nSotqkpRsk2OWUEbTMzbKtUTaizsrhDKRAIBD/CT1ulnbhRIeDpE0lBStytmhRDxzwJx3wAL3bvSuE+K9HQ15eeQ3x8FN4Fv3nN49UrU3WrleR2iS9Pn2aZkhQfFycdNJocwzx5pVEVhfVqaZBdcibFqVkLac3ey317eH34oFJ7MeHh0g0d/x8o+GdPtM3MgIRL3vfXqUHgcw+ldqNDQlJdbJ8cAwcH6dnn9EnCk6wHg4R7SJXdOAIJ6/VSO+4jsTMFivknf/sO0vPNqZOka/OSE/bhA2Efv4+8Je10PVm7mpC3imUo1PedwqHJWYlBbkfpyr8vz54qLatXxoyU8nu+1m1/6Oo5/4cPONS4PoebNlQ4GkUgEAh+NT9ljRwkjHg9WbOK6JAQacrSoXYdpXYd6tQl0PO51OhrGhpi+e38uazEpmIl6cy2s73+pNT4iWiZmPL5/j3uzZ9DXBq7RA0ccqNjYZFwwrv3G460aEKeho2JDg5GTUuLQr37ZkqmuKgoNuXLg23VatjXrI2pWwE09PQIfe/LoxXLpPVM9qnsEvxZciZF19qakqPGcv3v8RAfz5EWTXFp1yFhJ6NMRujbtwQ8fcLrI4dw792XMpOmZvgbT9et4e2ZU0rfOdZvmOIe35+Btrk5NVav40iLpsTHxvLpzm22FilIrnLlMS3kjpqGBqHv3xP85jWf7t7BrWt3qi5Znq6/pgULYWDvQLCPN9EhIeyuUIYC3XugY2mJ/8MHPF23JtUNI363bnKsbUsc/6iPTeUqGOV1QqamRuBzD27PmiHZs6v2Pf/kbdgY+5q18DmVoDTuLFOCgj17Y16kKDGhoYT4+PDx1g18Tp2kwYEj6H6borQsURKbylXwvXCeyMBAdlUog3vPPuhaWxP4wpOn69YQEZA9U6sAlf5dxO7ypYmLieHcX73xOXUSm0qViYuKwuu/fdL6Wm0zszSPqFGFaxPGSvdGy+Rq1NuzPx0XAoFA8HP4aYqcbZVqCv8N7B1S3S3mULuuwtlTNpWqKFztlVUUHzGKF3t2EfnlC4EvPDnZ+fvIhLqODg0PHeNEx7bK1yPJZBQfNlK6r/T14UO8Ppxw8KxT0+Y/pCDFx8Xx9sxp3p45rfS9eeEiFB8xWjXPslHOpJQcPZaIAH/uL/yX+NhYnm3awLNNG7LEbwDPndtTfadrZf1LFDlIUCLr7zvI6e6dCf/0ifjYWHwvXVS47imjyORyKs6dz7E2LYmPiyPo9Suujf+uiMjV1am9aSun/+yq9M7U6JAQaaeqMlw7diZ3ktE0ZDLqbNnB0VbNeHf+HJGBgdxJovSlRY3V69hbtRKhvu8UbpWAhDWwdbbtVHoFXlZgUbQYtbds53S3zkSHhvJy725e7t2tYEfbzIz6+w4qrO3LDEmP24kJC/0hvwQCgSAr+WlTq7pWVgqLw5MeO5Ic2ypVFY7XSOt+1R/B0DEPLS5exalZC7RNTZFraKCXy4Z8rdrQ+uZdbKtUTXNtXtHBQ6m6ZDlmhdxR09REw8AAU7cC2Fatlqqb9FDT0qLaitU41m+Igb0DatrayNTU0DY1xaZiJSrOmU+LS9cUjr9Ij+yQMwUyGRXnzKf5hSu4tO+IQW5H1LS1pQOObSpVpuyUaRT+a0DWffM3IXfdP+jo4UXFuf9iX6s2urlyJcSzvj6GjnmwrVKVMhMnU2TAoPQ9+0beJs1ocuocjvUbom1ujlxdHV1ra5xbtKLF5evka90WM/fCKdxZlSlLmUlTsalUGR1LS+Tq6lK+dqzfkLo7dlNjzfoU7rRMTGh8/DS1N28jd7366FpZIVdXR01bG307exzrN6T6qrXYJNucZOiYh9Y37+Letx8GDrlR09RE18qKfK3b0vrGHZybt1SY0s1qnJo2p82dBxQZOBgTVzc09PTQ0NfHzL0wJUaNof3DZwrr6TJLscHD0LWyQjdXLooPH5UFkgsEAkHWIIuPT2PvvkAgEAgEAoHgt+X3OZJeIBAIBAKBQJAhhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEMRipxAIBAIBAJBDkUocgKBQCAQCAQ5FKHICQQCgUAgEORQhCInEAgEAoFAkEPJNkXuwMkruFTvlOqvQM0uAGw/eIZaHYZliwzudbop/faNe08BuPfkBS7VO/Hla3AKt2HhEbhU78Tth88ls+4jZzN54cYUdm/ce0qfcf9SseUACtftTvV2Qxg5YyW+fv7ZEi6B6iSm470nLwB4+caXSi0H4uf/5RdLJsgM2VlfZCUVmvfnwMkradrx8n6PS/VOfPgUAPx43lTlm1mFKt/KKWn1M8nu+udn5gHB74N6dnlcqbQ7OxZPkP4P+2c5lUq707hWBQBkMll2fVqBIX+2pExRNwUzZ0fbLPN/w57jzFi6ldqVSzG+X0f09XXw8fXj3LV7GOrrZtl3fmf6T1xIr3YNKeSS51eLki4mRgYULeCEro52tn/ruZcPc1fvYsW0Idn+rd+FTftOEhUdTfdW9X4rv3ICPzNvZjU5Ja1+tZxZlcb/i3VLUv7Xw5+cbFPkTIwMMDEykP5ra2mQy9KMogWcs+uTSnG0tc62bz5+/pqZy7YxtEcr/mxT//uLEtCmYfVs+ebvRkRkFFduP6ZXu4a/WhSVMDU2YNGkAT/lW6ev3P0p3/mdOHvlLhVKFfrt/PoVZLSv+jPzZlaTU9LqV8uZVWn8v1i3JOV/PfzJyTZFLiOoqalx5sod5q3Zjfe7j1hbmtGt5R+0aVhNshMbF8eSjfvZfeQ8X74GU8TNiYmDupAvC0fXMsrGvSdwcrSle+uM9e6+BocyY9lWLtx4gP+XIOLj46V3h9ZM48vXYDoOmc6DY2vQ0tSQ3s1ZtZNHHq9YP2ckAJ/8A1mycT+Xbj3Cz/8LuSzN6NuxsTTqCQlD7Wtnj+DExVts3HsCBxtL9iybRGxcHCu2HOTo+Rv4vPuIoYEe9auXZXivNsi/tUAVmvdn6rDuHDp9lQs37oNMRkl3F8YP6IiNpRnPvXzoPGwGIWHhNO8zEYCmdSoyY2RPAI6eu87iDft58+4jdrksGNqjFbUqlpBkSy9NY2Jimbt6J0fOXicgMAgLM2MqlyrMxEGdUx3RXbfrGBv3HOfzl6+4OedmZO+2Cu+9vN/zR5eRnN/xL9YWpnh5v6dN/8mc2TaPcXPWcu7qXdo0qs6oPu1UynMBgcHMXrmds1fvER4RiZ21BTNH9WTXkXNsP3gWAJfqnQA4s3UettbmKWQ+cvY6m/efxPP1O2RAmaJuTBzUGXMTI8nOE883zFm5gzuPPVGTy8mXx451c0aio6VJbFwcyzb9x97jF/H7HIiFmTF9OzSiZf2qKqXD7iPnWbvrKG/ff8JAT5ciBZyYOLAzVuYmfPgUwLSlW7hx7xkREZHY2VjSvnEN2jaqoRCGuPh4GnYbw4s377h8+xGzlm/Hxsqcs9vmAeDh5cOs5du4/cgTuVxG+eIFGd23vdL4SM8vVeoLVcKdnArN+/PP8O7sOnKeq3ceI0NGtfLFmNC/I4YGegAs2rCPizcesHPJRAW3rf6aRNVyRenbobFkFhoeyZCpSzl37R4yZFSvUIzx/b77lZzkeTO9dFdGWEQEY2av5uTFW8hkMkoWdmFc/4TymsiXr8FMW7qFs1fuERcfR/XyxZkwoJM0g6BK3ZDVaZWU2w+fM3f1Tp6+8EZdTY6zoy2Du7UgJDyC/hMXcnn3IowN9SX7nq/f0aDbaA6vm46+ro7S/Nq6YfU05Xz6wptpS7dw/8kLDPR1adeoBn07NpbqmcFTluCS154vX0M4cOoy4eGRlHDPz9Rhf3Lu2l3W7jzKJ/9A8jjkYmy/DpR0d1Epjb2839O8z0Q2zhvFzGXbeOTxCgN9PepULsnwXm0U6v9EJs5fl2bdokoeSC+8yjh67jrLtxzEy9sXAz1dalUqwaTBXQEIDAph1ortnLlyl7DwCFydHBjWoxWlk8yGdRw8jSIFnBnWo5VkFhkVTeG63dm6YBwl3POrFB+phT8yKlppXX7j3jNKF3VlVJ92CuEZ8PciZDIZCyb2k8wCg0Ko2GIACyf1p3q5YpJ5bFwclVsO5K9OTWjXuAZx8fEs33KA3UfO4/c5EBtrc7o0r0O7xt/rxYzUFT/Kb6HIfQ74yuIN+xnVpx121uacuXKXifPX4eJkT7Fvo2mL1+9j+6EzTBrclTx21qzddZTuI2ZxYtNstFOp1ADi4uOIjY39biCToSZXXBoYG5fMzjez9Lhy+zEt61fJ8DTx3/+uJzAohENrpqGlqcHURZu59eg5WxeMxdzESFrDlx4yuYywiEimDuuGjZU5e49dYPTMVRR1cyK3nbVkb+6qnUTHxDBrVC9MjBIqQDW5nHcfP9OvYxNcnR144vma4dNW4OxoS/O6lSW3w6ctZ/yAjkwd3p3w8EiG/rOU8XPXsmbmcJwcbdm/ciqVWw1ky4KxFHDOjbq6uhQ3Q/9ZzrAerahcpjCXbj5k0OTF7F46CTdnByD9NF21/TAXbzzg3wl/YWpsyOu3H3j34XOq8X3w1BVmr9zB6L7tKF+8IB5ePoyevTpF45OcoJAwBk1egoONJUumDpIqPFXyXN9x8wkKDWPSoC6Ymxrx+Pkr7HJZMLpve+RyOT7vP7HwW0Who62l9PvvPnyifrWylCnmRnBIGMOmLWf2ih3MHJWgEAcEBtNx8DTKFHNj8aQBqKnJefHGV2rM/12zm20HzjDkz5a4OTvw5t1H8jrYqJQOdx55MnXxZmaP6Y1LXns++Qdy9c5jzIwNARg9axUG+rpsnDsKNTU1Hj9/jaFByiUDcpmMXUsn0rr/FOpXK0OnZrWRfStnvh8/037gP9SqVIKN80YTHR3Dqu2HaD9oKgdWT0uxBCEtv0C1+l7fsKUAACAASURBVEKV/KeMEdNXMGVoN+aN68sHP38GT1nKqFmrWDplUFpZSCnzVu+ke+t69OnQGP8vX5mycBNjZq9m8eSBKrlPL92VsXDdXto1rsGWf8cSGhHJpH83SOU1kX4TFhIRFcWqGUORy+VMXriRsbNXSyNFqtYNkDVplZSIyCh6jp5Lj7b1mT6iB6FhEdx88AwLM2NK2FhibKDPsfM3FGY8jpy9hktee5xz29J1+Eyl+TUtOQODQug0ZBo1K5ZgXL8OvH3/ibFz1mBooEfHprWk7yzd9B+92zfi1Oa5hIVH0GP0XFr3m0Q+R1s2zR+DiZEB89fsYvDkpZzZNhcNddWa17DwCIZPW8G04X9S2DUvr3ze033kbCzNTejZtkEK++nVLenlAVXDm5SLNx4wZMpSurWux7j+HQkKDiUmJqHNjI2NpfvI2chlMuaP74uRgT5Hzl2n+8jZbJo/JsMzYunFR2rhf+XzQWldbmttwcqthxjRu63UFoSFR3D++n3mjO2j8G1jQ32qlSvKwVNXFBS5m/ef8SUohLpVSgMwfelWjpy5xpi/2pM/jx33nrxg+rKtREZF07Vl3QyFNyv4LRS54NAwZozsQf689gB0a/UHB09f4dqdJxQr4ExQSBjr9xzn70GdqV2pJACTh3SlcquB7D9xOc3e3aDJSxT+62hrce/IKgWzCs37Z1jm2Lg4PgcEYmdtkWG3567dZ9Hf/aWp594dGlGrwzDi4+LTcamIuYkRs0b3kv4P6Nqcrf+d5sEzLwVF7t2HzxxYPRU1NTUF9/8M6y4957a14uDpq9x9/EKhsq5evpg0wqejpUn3VvXoM24+cfHxqMnlUgWirampsO5j0Ya9NKldgW6t/gDAObct1+89Y9X2Q8wb11elNPXx9cPdNS/FCuaTZEyLVdsP06ZhNakycsptQ2xsLMOmLU/TXXx8PLksTJkwoJNkpop8F2884IHHK05tno2NVUJvuIR7fskPdXV11OTydNfD9EhWWXdqVpst+09J/9fuOoq5qRGLJw+UKqJyxQsCCaO7G/eeYOrQbjSsWR5Aii9IPx28fT9ibmJEjQrFkctkONhYKoTB29ePfp2aSmXTKbdNquHQ1dFGLpOhoaGuEOZ1u4+Ty8qM6SN6SGaF3QZQve1Qdhw8kyL8afkF6dcXqoQ7NWpVKkmdyqUAyG1nzaQhXWjZdxKvfN6Txz5Xqu6UUblMEWnJQT5HW6aP7EGrv1T3K610T40S7vnp17mp9H9Al6YM+HsxcfHxyGUyrtx+zN3HnpzYNBu7XAl115Qh3Wjaazxe3u/J65Aglyp1QyI/mlZJCfgaTEhYOI1qlpfKVIF8uaX3DWuW48CpKwqK3NFzN2hetxKQdn5NTc71u49hYmzI1GHdUZPLcclrj897P5ZvOaCg2FhbmNK3Y8Ioip6uNi3rVWHywo1smjdaGkH9s0191u06hvc7vzTLSnJ6tKlP8UIJ5TZfHjsa16rA1TuPlSpy2lqaadYt6eUBVcOblEUb9tG8XhWG92yd4t3Zq/fweOnD2W3zsDAzBhLSzPvdRxZv3M/qGRnf8JJWfKQVfmV1uYmRATOXbeP63SdS+Tl79R4a6upUKVMkxbeb1q3E4MlLCA2LQE83wf+j565TvkRBTI0NCAgMZsv+U8wf31eqK/LlsSMyOpoFa/fQvklNNDV+rmr1Wxw/oq+rIxW8RKwtzPgc8BUAz1dvCQuPoGrZotJ7DXV1XJ0cePbSO02/h/dszZ5lk6Tf1gXjUthZP2ekgp09yyaxZcHYtIWOj08oGPKMjcbFxsURGRWNPEmvNXGEMJ6MKXLJkctkWFmY8jUkVMG8ce0KKZQ4ZdhYmhEUrOi2UH5Hhf9mJkZERceksJeUuPh4Hj57pZBeAIVd80rppUqaNq5dgYOnrjJ+7lo8X71NU/bY2FhevH5H5dKFFcxLFFY+xZGc5n8oNlCqyPfQ4xWuTvZSg5NV5LI042uS+H34zIvq5YspHVn0fP2WiMgoalQonuKdKulQsZQ70TExdBk6g8u3HylM8wO0+KMKUxZtZNnmA3z+8jVT4bn90IPKpdwVzDTU1alQsiA3H3hk2L/06gtVwp0ayUfr3F3yoq2liYeXT4blTK6oFHZN8Ou5V9p5OZG00j01kirhkBAv0THfy+u9py9wcrSVlDhICLOGujoeXqnHjbK6QRXSSytl3ylfoiCdh85g3/FLRERGKbxvVqcSdx558u7DZyBhyv712w/Ur14WyFx+vffkJZVLuSvM1BR2deJzwFf8A4MkM5dk4TA21EdLU0Oh02zybco3qTtVKJisnjU3NeJTKnGUHunmARXDm0hcfDyPPV9To3zKOgbg9qPnuDo5SEpcIlXLFuXW/WeZCsOPxEfyujxxlO3Aqe+7eY+eu07tSiWVKlxVShdGT0ebExdvAQlt9omLt2jwLY/df/qS2NjYFG1NtbLFCAoJS7McZRe/xYhc0k0Richk3xWb958SjvGo0W6ogp3wyMh0Rzvsc1mmu5vS1ckhhQxh4RFpulFTU8PYUB/fjxk7YkRNLqdSKXfW7TpKgXy50dLSZOW2gxRxc8LSzCRDfr1++4HVO45w59FzvgYlFNIvQSEp7FmZp/Q3JCycNduPcO76PT4HBBEXF0doWDiVk/VQjA1Tpg2QosFPin/AV6JjYhg5YyVjZq2WzKNjYqSCo0qalinqxs4lE1m+9QCNe46nXDE3xg/ohGOSijORL19DiI2Lw/TblGAiqu4ctjI3Vfivinx+n79IU5A/wvaDZzl46gq+Hz8TFR1DVLSiou/n/4VKyRQh6d3nQLS1NJWWA1XSwdzEiD3LJrFy2yEGTFyEtYUpI3q3kXqqfTo0wtnRltXbD7N0036a1a3E8F5t0NfVUTl8IaHhSsu4qZEhr3w+qOxPIunVF6qEOzUM9FLmFyMDvUwpsYm9+aQYG+rx6UugSu7TSvfUSB43icsQEsvrB78AXr7xpWTD3gr2YmJj+eSfIJeqdUNm5EmQKe1O6/J/hrDtwGkWrt/LrBXb6Nm2IV1a1EEmk5E/rz1uzg4cPH2F3u0bcezcDYoVdJY6U5nJr+/9/Lnz6Dn7T1yWzOLiE5bWfPIPlMq4srWNqS2XII36URlJ1/xl1o9E0ssDqoY3kYDAIGJiYjEzUV7XBYeGS0t2kmJqbEB4ZBRR0emXu+T8SHwkr8shYe328Gkr+HtQF2JjY7lw4wHL/1G+41VNTY2GNctz8NQVmtapyI17TwkLj6RWxYSZmZDQMLS1NFOkvalxQrwntsU/k99CkUuvw6mno4NcJmPPsr+lNViJaGunvl4kuylawJnz1+9Lw+2q8s+w7rTqN4lqbYegr6tD6SKuLJ6cZCdTKhGS9Ly7sPAI2g2YSu3KpVg/Z6SkBNbuODyFOxkp/Rs8eQkhYeHMHtObvA42yGUyxs1dm6led3ISlYrJQ7pSPMk0HySs6wPV07RAvtwsnNgf34+fmbZkC+0GTOXUljkpFBdjQ33kMhlfkymyQSFhKsmcPMpVkc/UxJBHz1+r5H9qrN5+mLU7jzJvfF9Kurugrq7GwVNXmLp4s2THzNhQ6VmHAGYmhkRERhEeEZmiYlElHSChtzvmr/YM7NqctbuO0nvMPLYvnkARNycAalUsQa2KJbj75AXj565l8OQlrMrAdImJkYHSnn7A1yCMUln4nxbp1ReqhlsZX5Pl/7j4ePy/BGFhkjDakJprZekTEhauxF6I5Fd6pJXuqaGsrCdFT1ebAs65FRZ4J2JkmJAWWVk3ZOaUKS1NDbq0qEvHZrU5cvY6kxdsIDY2VjoZoGmdSuw6cp7e7Rtx9PwNOjWrreA+o/lVT1ebVvWr0rXlHyneWZqrlla/E6rkgYyE18RQHzW5PNW8aGKozxPP1ynMAwKD0dHS/K7EKckMGc3fqqAsz1UpUwQtTQ3OX79PdHQMBvq6lCnmltLiN5rVrUTTnuMJCAzm2PkbVC1bVOqYmRgZEBEZRVh4hEI7FBCYEJbEcpSRuuJH+S2mVtOjiJsTcjU5b3z9sLU2V/hlxYhIZmndoCr3nrxg/4lLGXJ388EzrMxNuPHfUi7uWsDccX0URuMSdysmZoxEkhaWZy998A8M4q+OjSW3QSFhvPv4Od3vx8fHc/XOE9o3qYlzbltp6ubZi4wPCSfuqoqI+j4FoqerjZtzbp699E6RXokbCTKapjZW5swe0xv/wCC8ff1SvFdXVyOvgw3X7j5RMH/w9GWGw6SqfAXzOfL05RvpQFdlcZN8aig5l28/ombFEpQtVgB19YTp76fJ0qFAPkfOXrundBTU2dEWDXV1zl69l+KdKumQ3H7/zk0pkN+R+98OUE5KsW87zu4peZc0zJGR0QpmpYu4cuH6AwX5Y2JiuXzrcYppoPT8UoWMhjspSQ8AB7h08yExsbG4OCVMq5mbGqUolwGBwbz3S5kH7jzyVPh/66EHkVHRuDrZp7CrjLTSPbOUdHfBy8cXbW3NFHGjr6uT6bohs2mVFmpyOQ1rlKNlvarcTZLnGtYoh5e3L2ev3sPb109agJ4cZflVmZwl3V146PEKGyuzFHGi6oaFn40qdUtqZDS8ampq5M9rz9lUjvwoU9QNDy8fPn5WPOT43PV7CuXb3MSQgGQduseZ7AhnNPyJo2wnL93i1OXb1KtaJsWmx6S45LXHxcmeU5dvc/rKXRrUKCe9K1rQGU0Ndc5fv6/g5ty1ewlLCfJkvK74UX7PXJoMU2MDujSvw8gZKxjQpRmFXPIQHh7JkxdvKOGeH3eXvKm6fenjm6Jyzp/XTukUSkapXr44LepVYcys1dx6+Jwa5Yujr6fDJ/9Anr/yYVC3Fkrdeb5+h7qaGo88XqGhro6WliZ2uSyk3WgONpbYWJqxYutBRvVpS2xsHJv2neDlG1+KF0ooGHa5zFFXV2PbwTM0r1uZ95/8WbR+n0pTiTKZjNy2Vhw6fZVC+fMQGRXN1v9O8eFzADZWaTd0ydHS1CC3rRVb9p8il4UZMlmC0jWwa3P6T1yIupoalUsXRq4m55X3ezQ11WlQvZxKabpp30ksTY2lRnT30QsYGegprElJSvfW9Zi0YAMOtlaULuLKyze+zFu9K0NrjBJRRb7q5YuRz9GOXmPmMbBbc8xNjHju5UMRNyfy5bHDJY8dG/cc5+qdx9jlssDc1DjFjkNHO2su3njIg2deGOnrcv7GA45duJEiXDsPnWXwlKW0a1QdNTU17j72pFPzOpgZG9K2UXX+/nc94RGRuOS1x9fPH3V1NaqXK5ZuOly48YB3Hz5RvFB+9HV1uPXQgxev3+H+bTRu9ortVCjpjoONJUEhYWz97zRF0tiFlj+vPYfPXKNWpZJoqKvhaGdNl5Z12X30PKNmrqJT89rExMSyctshIO3zFpX5pSrphTs1rtx5zJKN+6ldqSTvPwUwfu5aalYoLn27ZGEXJi/YyIGTV6hfoywBX4KYtGCD0rWyV24/Zunm/6hVsSSfAwKZ+O8GalcqmWr+TU5a6Z7ZxdRVyxUlv6Md3UfMpk+HRthZW/DlazC3HnrQo00D9HS1M1U3/EhaJeXdh8/sOXaByqULY2lugve7j5y8dItW347SgYQRkaplizJz+TZpAXoi6eVXZXJ2a/UHjf4cy8DJi2ndoBpG+nr4+vnz5u0HpRtxfgdUqVtSIzPh/atTEwb+vQhTE0Mqly5MSFgEb9/70aZhdSqULETxQvnpO/5fRvZqg4mRAUfOXuf05TtsnDta8qNM0QLMW72TDk1q4ersgOertyzasC9TlwMoC396NKtbia7DZhIdG6vSBoymtSuydudRIiKiFDZF6Ovq0LNdQ6Ys3JQgS14H7j15wfw1u+nVvqE0uJGRuuJHyRGKHMCwnq0xMzFi3e7jvPvwCT0dbdxd81K5dNrrNhas3ZPCbNO80Qrn2/wIU4d2o3jBfOw8fI7DZ64RExODqbEhpYq4puqmkEselm8+QJv+UyQzXR1txvXvQPO6lVFXV2PhpP5MW7KFCs37o6GhLp2jc/ryHQAszUyYMaIHC9btZeXWQzjYWjKsRyvOKBmZUcas0b34+9/1NPpzLIYGerRrXIMRPdtw6vLtjMfBsO5MnL+OOp1G0OKPyvw9uAvVyhVlyZSBLNm4n7U7j6ChoU4e+1z81amJ5C69NNXR0uTfdbvx/eiPpoY6BfI5snrm8FQrrGZ1KxEQGMTSTf8xZeEmnB1tmTioM+Nmr8lwmFSRTyaTsWr6UGYs28bI6SuIjIrG0c6aOWMS1h/Vq1aGq3ee0G/CQtTU5OxcMjFFAzegSzP8/APpMmwGcpmc6uWLsXL6UNoP/EeyY2VuwoZ5o5mzcgc9Rs9FTU1Oofx56NyiDgAje7dFW0uThev38vnLVyzNTBjyZ0uAdNPBQF+X/ScuM3fVLmJiYnCwtWLq0G7SQv2IyGhGz1qF/5eEadDyJQsyspfi2XxJ6depCcOnr6B57wk42FhyaO10zIwN2b5wPDOWbaPT4OnEE0/ZYgXYsmBsmlOryvxSFVXynzJG9GzN1btPaN1/MgA1KhRnfP/vO+Ccc9syY2RPlm35jwnz1mJqYkinZrXR0Um5Vmr9nJGs3HaI1dsPI0OW4NeAjiqHIb10zwxqcjmrZg5j3updTF6wkcDgEEyMDKhQopA0IpyZuuFH0iopOtpaPPF8zdb/ThMSGo6luTGNapanW7KzOpvVqUTf8f/Sp0MjBfP08qsyOa0tTNm+aAKzV+6g/8SFxMTEYm1pRos/Uu7Q/V1QpW5JjcyEt1bFEkwb8Serth1mxdaDGOjp0qT291uaVvwzmNkrdzB4ylKCQkJxdXJg5fShCiNyLetV5u2HT/QZN5+vQSHkccjFsJ6tGTt7dWqfzVD408Mlrz2W5iaEhIZLy0bSomHN8sxcsZ3GtSqk6Dj179wUQ31d5q9JaJ9srMwY0LWZwjR/RuqKH0UWn5Xj9oJ0OXb+BjOXb2fZ1EHkz2uPXCYjLDyCrf+dZvHG/dw5vDJTI0gCgeDHqNC8P6P6tJWOcRH8vly88YB+ExdyZc9ipZtKBAJltPprEuVLFEx1tiwpQSFhVGzRn+X/DKF8ibSP/fnV5Ig1cv+fOH7hJjUrlsDVyUFS2HR1tCnhnp/IyCiio2N+sYQCwf8uolubM9h15Dx1KpcSSpxAZTxfveXhMy+a1q6okv0Dpy5jbmqU5qaI34UcM7X6/4WC+fOw/cAZGtYoR748dkRERPLY8w0zl2/jj2pllF7JIhAIBP/rBAWHEhgcytmrdzl79S57lk361SIJcgDevn4EBYcyYf46/qhWJs01qtExMfh+9OeVz3sWrtvLkD9bpbkp4ndBKHI/ma4t6xIREcmwf5bx3s8fNbmc3HbW1K9WJsN3tgoEAsH/ClfvPGHkjBWYmxozZ2yfFAcNCwTKGDR5Ma98PlCptDuTBndJ025AYDDNe09EXV2N9o1rpnlr1O+EWCMnEAgEAoFAkEP5/ccMBQKBQCAQCARKEYqcQCAQCAQCQQ5FKHICgUAgEAgEORShyAkEAoFAIBDkUIQiJxAIBAKBQJBDEYqcQCAQCAQCQQ7lf0qR23vsIq3+Uu0QyZdvfKnUciB+/l/StDdm9mpmLd+eFeJlK6qE3cv7PS7VO/HhU8BPkipnMHP5NrqPnJ2mnQMnr1CrwzDc63Sjx6g5P0ky1ckp+VRVKjTvz4GTV6T/dTqNkO4hzi4yUn/kBJLHYVax/eAZanVI/1LyrCa98GRXeDP7rcFTljB2Tubugc4uflYbkN310XMvH1yqd+Kx52sF88TwLd38Xwo37QZOZdzctcDvmTZp8T+lyNnbWFLYNa9Kdk2MDChawAldne9XwPSfuJBHHq8U7Lk55yavQ64slTM7SB72514+9Boz7xdK9P+HzwFfGT17FZ1b1OHI+hmM7dfhV4uUgpySTzNLqSKuWJobZ5l/m/adZM3OIwpmGak/fpS4+HhqtB/62/klyBzK2o5fza9sA5LXR8rK24+QL48dxob63H7goWB+6dZDNNTVuXD9gYJ5VHQMjzxeUbqwa5bJ8DP5n7rZoVRhF0oVdlHJrqmxAYsmDZD+R0RGceX2Y3q1a6hgr2PTWlkqY3aRPOynr9z9hdL8/+LFm3fExsbRtlGN3/Y6l5ySTzPL1KHdstS/s1fuUqFUIQWzjNQfP8q9xy/w/xL02/klyDiptR2/ml/ZBiSvj5SVtx9BJpNRqrALtx4+p1PzOpL55VuPqFetDIdOX+VrcChGBnoAPHzmRWRUNKWLCkVOKUfOXmfz/pN4vn6HDChT1I2JgzpjbmIEJAzDX771iAFdmzN29mqevnjD/PF/UbNiCb58DWba0i2cvXKPuPg4qpcvzoQBnTDU103xnSkLN/H4+Su2L56gYL5m5xE27z3JmW3z2HHoLGt2HOHk5oSpr9sPnzN39U6evvBGXU2Os6Mtg7u1oHRRN7y83/NHl5Gc3/EvQcGhdB42g5CwcJr3mQhA0zoVmTGyJ4OnLEFXR5t/hnUHEoZk3ZxzExIazr7jlwgJC8fJwYaRfdoqNAKnLt1m0YZ9vPJ5T2RUtGRep3IpFv7dP8047TVmHmYmhkwb/qdk1mnIdDxfv+XynsXIZTIAVm47xP4TlziybgbbD56Rwj5x/jq2HzwLgEv1TgCc2fq9Z/bm3UdGzVzF3ceeGBnoUadyKYb1bJ3qPbDbD57hzJW7NKxRjmVbDvDuw2eszE3o1qqewhUncfHxLN9ygN1HzuP3ORAba3O6NK9Du8Y1Ug2rqn4DHD13ncUb9vPm3UfsclkwtEcralUsIb2PjYtjycb97D5yni9fgyni5sTEQV3I52gr2fH29WPSgg3ceuCBro4WTWtXQp6GcjZn1U62HzhDfHw8BWp2AeD8jn+xtjDFw8uHWcu3cfuRJ3K5jPLFCzK6b3tsrc0l9xWa92ft7BGcuHiLjXtP4GBjqfQOye0Hz3D++gNqlC/Giq0H+fApALtcFgzq1oI6lUul61/yfApw/MJNlm0+wMs37zDU16Ne9bIM/bMl2lqaQMI0RJv+kzmzbR7j5qzl3NW7tGlUnVF92qWQ76GHF8u3HOTBUy+CQ8NwzWvP2P4dcHfJK/nVsu/frJ45jGlLtuDh5YOhgR6tG1SjX6cmyL7l2cFTllAwnyN+/oEcOn2VkLBwCrvmZcKATmleyVSheX9G9m5Lo1rlgYSrdmav3M7Zq/cIj4jEztqCmaN6UsglD5/8A1mycT+Xbj3Cz/8LuSzN6NuxMY1rVSAuPp6G3cbw4s07Lt9+xKzl27GxMufstnkKZSiR9NLYy/s9zftMZOO8Ucxcto1HHq8w0NejTuWSDO/VRmmZ2nf8EhPmrSUqOkYqn9NH9KBZ3UoZLkNp+QUQFhHBmNmrOXnxFjKZjJKFXRjXvyM2lmaSH09feDNt6RbuP3mBgb4u7RrVoG/HxlKaqUJ6ee1H0iwjZEV4MyLLcy+fVNsOADW5nBVbD7L1v9N8DQ4ln6MtI/u0paR76h2G9MrajXtP6ThkOg+OrVHIX3NW7eSRxyvWzxmZJW3Aj9QfifXRlKHdlJa38QM60n/iQi7vXoSxob70Tc/X72jQbTSH103HOff3elsZpYu4snzLQel/TEwsN+4/Y+X0oVy984RLNx9Sv3pZAG4/eo6DjSXWFqaSfVXSJj0dJSPt14+Q7Yrcuw+fqF+tLGWKuREcEsawacuZvWIHM0f1lOy8efeRvyYsoE2Dagzu3gJXJwcA+k1YSERUFKtmDEUulzN54UbGzl6tMFKWSIOa5djy3yl8/fwVCuWRs9epV71sikonIjKKnqPn0qNtfaaP6EFoWAQ3HzzDwizl9IyToy37V06lcquBbFkwlgLOuVFXTz3qlmzcT9tG1Tm45h+0tTRZvHE//Scu5PSWuejpauP56i2DJi9hwcR+VC1bhEfPX9Nt+CyG9mhFi3pV0o3TMkXd2H7orPQ/ODSMe09eoKWlySOPV9L0z60HHpQp6pbC/ei+7ZHL5fi8/8TCif0A0NHW4pXPBwAmzFtHnw6NmDiwEy/f+DJ61ioszIzp2bZBqjJdu/uEqOgYlv8zBFsrM85du0+/CQtwcbKnWAFnAKYv3cqRM9cY81d78uex496TF0xftpXIqGi6tqz7Q35fuf2Yof8sZ1iPVlQuU5hLNx8yaPJidi+dhJtzQn5avH4f2w+dYdLgruSxs2btrqN0HzGLE5tmo62lSVx8PD1Hz8XESJ8Nc0ehrqbGpn0nOXP1bqpTav06NaGomxMD/l7ErYPLpbj0/fiZ9gP/oValEmycN5ro6BhWbT9E+0FTObB6mkJnZO6qnUTHxDBrVC9MjPSVfgfg8q2HxMXFsXHeGEyM9Nlz9AKDpyxl19KJFMznmCH/Tl66zdCpyxjYtRmVyxThvZ8/05du5a2vH8v+GSzZCwoJY9DkJTjYWLJk6iCFspWUgMBg8trnok/7RuhoazFj2VYG/L2YM1vnSmUvJCycCfPXM334n7g6OXDroQcD/l6Eob4unZP0mhdt2Ee3Vn9wbOMs4uLimLl8G91GzubU5jkKDX9a9B03n6DQMCYN6oK5qRGPn7/CLpcFADK5jLCISKYO64aNlTl7j11g9MxVFHVzIredNbuWTqR1/ynUr1aGTs1qI0tFkVc1jcPCIxg+bQXThv9JYde8vPJ5T/eRs7E0N1FaphrUKIumhjpj56zhyp5FAFIDmtEylJZfAAvX7aVd4xps+XcsoRGRTPp3A+PnrmXNzOEABAaF0GnINGpWLMG4fh14+/4TY+eswdBAT+VRXlXz2o+kmapkRXgzIkt6bcfxCzcpW6wACyb2Q09XhxVbD9Jvwve2QhmqlLX0+NE2IKvqD7lMprS8aaqrYWygz7HzN2jTsLpk/8jZa7jktU9XiQMoXdSNf5Zs4fXbDzjalzA+0wAAIABJREFUWXPn0XPi4uIo4uZEpdLuXLjxQFLkbj3woFQRxdE4VdJGFR1FlfbrR8l2Ra5HsoqqU7PabNl/SsHMw8uHOWP70LBGOcnsyu3H3H3syYlNs6XCPGVIN5r2Go+X9/sU632KFXDG1tqcY+du0K3VHwD4vPfjkccrhVGIRAK+BhMSFk6jmuWxsUroPRfIl1tpGNTkcnS0tQDQ1tRUWDenDDMTQ0b0biuNjPXv3JTV2w/j4eVD8UL5OH/jAQXzO1KjQnEAirg5Ub1CMZ699EZTI/0kKVPMjZnLt/HJPxALM2Mu3XyIU24b7KwtOH/9PoVd8xIXH8+dR540r1s5hXttLU3U1dVRk8uVhqVD01o0qV0RgDz2ubj5wIOrdx6nqchFRkUzZWhX7HNZAlCjQnEKuzlx7c4TihVwJiAwmC37TzF/fF9pBClfHjsio6NZsHYP7ZvUTDXs6fkNsGjDXprUriClvXNuW67fe8aq7YeYN64vQSFhrN9znL8HdaZ2pZIATB7SlcqtBrL/xGXaNKzG6ct38Pb1Y9O80ZJCP2NkD+p2HplquLW1NKWGMWlcrtt9nFxWZkwf0UMyK+w2gOpth7Lj4BmFcvHuw2cOrJ6Kmppaqt9JjIdJg7tIvcb2TWpy/vp91u44ytxxfTLk3+IN+2jdoJokh0tee2wszWj451juP31JETcnAOLj48llYcqEAZ3SlK1KmSJUKVNE+j/kz1Y06TkOP/9ArMxNJPMeretTyCUPAGWLFaBnu4as3n5EQZGzMjdhQJdmUqM0cWBnqrQexH8nL9O6Qfq92Is3HvDA4xWnNs+WynYJ9/zSe3MTI2aN7iX9H9C1OVv/O82DZ17ktrNGV0cbuUyGhoZ6mmU9I2nco019ihfKByTk+8a1KqRapjTU1ZXmqcyUodT8SqSEe376dW76PS66NGXA34uJi49HLpOxfvcxTIwNmTqsO2pyOS557fF578fyLQdUVuRUyWs/mmaqkhXhzYgs6bUd6upqzBnbG41vyt3fgzpTokEvqa1QhqplLS1+tA3IyvojtfLWsGY5Dpy6oqDIHT13g+bfRpPTwyWvPYYGetx64IGjnTWXbj2iZGEXNDXUqVTKnSkLNxEfH088cPexJ+P6d1Rwn17aqKqjqNJ+/Sg/fUFPLkszvgaHKphpa2lKmnEi956+wMnRVoogADdnBzTU1fHw8lbqd4Pq5Thy9rr0/+i5GzjntpVG+JJiY2lG+RIF6Tx0BvuOXyIiMupHgqWAm3NuSYmDhB6wvq4OnwICAYiIiEQuV+w5qcnlxMfHq+y/oYEetx89B+Ds1XuULuJGqSKuXLh+HwCPlz6EhIVnas6/RKH8Cv9zWZryKeBrmm4M9XWljJqImYkhn7+5u//0JbGxsVQuXVjBTrWyxQgKCUs1TVXxOy4+nofPXlG1bFEFO4Vd8/LsZYK/nq/eEhYeoWBHQ10dVycHyc7TF29wc3ZQGJWVyWQp4kMVbj/0oHIpdwUzDXV1KpQsyM1kC3Ab166QrhIHYGyorzD0Dwm9zkT5VfUvLDyCZy+9qVxGMS3y57XHxso8hXzN/0jZGUgPG6uEnndQsrKeODqaSJmirvj5fyEgMFgyc3VyUBhZ0NbSpIibU4pwpsZDj1e4Otn/X3tnHldj9gbw723fd9n3JSEVKlmyy5YZMtbRkH3IGgZjz2AYRhjrWH+WsQ/Gvu9LESWllK0klVRaVPz+SHe63VvdUoOZ8/187h/3vGdfn/c5zzmvVCAoCBWJhNKlTHid9KZgzzkoTBvXrVVF5r+ZiWGBYyo3HzOG8iKnsARQppQp6RkZ0nbzC3yIk52VjO1n/drViYl7TWx8wXZ3yva1f6rNSqK8Rc1LdtzqOTR0ejraMmuFMuQ11j6G/NaAf2L+AOju3JxbASFERMUAWQqfR8+i5GSFvJBIJDSyssDHPys/l27606xR1nht2rAe8QlJ3HvwiJDwZyQkJWNvLbt7VVDbKCujFLR+FQclrpHbeegsh05dIfJFDG/TM3ibni5nc1TKxEhG8AGIio7j4eNIGrkMl3HPyMzkZaziTu7S1pHVH/ahy5cx48jZ63TJoeXLzep549lx8DTem/bx85odDO3jwoAezoWy/VBEzj19GT7IaU4O1qzadpDz1+/QzM6KoIdPOH3lNr9MG6E4XC5Usg057wbT3smOCzfusuTH7ylX2pT5K7cRF5/IzbtB1KpaAWND/ULnP3cYiUQCBQiZeZX5/YdCJ71JRktTQ/p2mo2JUVZarxPynoQKijs27jXpGRlMXrCWqT+vlz5Pz8iQaiiev4wFoE1f2dN7KWlp0rfA2FevMVFQX/p6Orwo4Bqa3CS9SVFY9yaGBtLti2yUfYvW15W3DTXS1yXmleyEUFB8iW9SAPl2hqz2eJ2QlCs+Ezl/ubnie4+t+08S+ugZySlpeb6U6OUqg6F+VtvGvnot7Qt6utpy4QwNdJU22I+OeYWpkUGezx89i2L9H0e4FfBA2u9e5SqzMhSmjRX2YSVf3P5Or+hjKC8UjnWQtt/z6FhuBTzgwInLUj/v3r8DsmzF8qtnUL6v/VNtVhzlLa68ZOVHvl9kzbd5h1F2rH0M+a0BJTF/KKJWtYpY1qjEodNXGN6vK8fO3cC2bg2lhX3IelHcduAUCUnJBIY+ZsHkLO25gb4u9WtX4+JNfwz1dalQthRlzWXzWVDbKCujFLR+FQclKsit3/kXG3YdZcn072lkZYGamiqHTl3Ba8X/ZPwpkpt0dbSoU6Myyz7s3+fE0EBXYXrZ2rej52/QrllD7oc+ZvnsvA8OaGqoM6BHB/p3b8+Rs9eZs2wzmZmZDO7duXAFLST1a1dj7EBXRvz4K7rampQ1N2XayH5yb9r50djGkn3HL3H3/kPepqdjZ22BupoaFcqW4tJN/yz7ONs6RcrfR8qxCjE21Cc17S3JKaly20WQd5sqQ3Z8c8YPpEFd2e0IyQfNp662NioSCXtXzZKzb9TSyrK7MjE04H6ovFYjMSm50HkyNtRX+AYf9zpBelJKmkeUq3BFb/wv415TysSwUPEZGeghkUgUCkZx8YkY5pp4CuoPtwNDGTrlF6aP7s/iqcPR1dHiRcwrnHqOkfObkPRGZsLMfis1y1EGRZqFmLjXVK2o3PUpJsYGBDx4pPBZckoqfUd70d7Jjk2LJ2NumiX0tu8/Uam4c1KYNi4OSmIMFdRXdHW06Nm5JQO/6Sj3TJnrXpTta/9Um31seYszL0ChlQZKjbU84nz1OlGhu+J85f2suOeP/Ojm3JzdR84zvF9Xjp6/gVv39oUKb2ddm/m/befSTX/MTAxlDkw1s7Pi5p0gzEwMcbCWtyUvqG2KIqOUFCW6tXrZN4C2zRrS2LYOampZWz2KFkpFNLKyIOxpJFpaGpQvYybz09ORf2PPxqWNI6cu+nDyog/WltXlVJqKUFVRwaWNI990asntwFCFfrLtTFLfFs8W7IETl1nlNZabB1dzcP08qT1CNmlv02VOs+bGwbYOD8KecvryLZrZWUlVwC0crLlyKwDfgAc42sp3zmw0NdSLdTu5IGzq1kBDXY3zH7Z+szl3zQ89HW1qVc37RGJB6OpoYVmjMkEPn8j1lWzjWmvL6qioqvA4MlrOT7YmwKJ6RYIePpHb+r8b9LDQebK3rs2F63dl3pYzMjK57HNPbntHWRIS3xD6OELG7cyVW9Suodi2My80NdSxtqwu1xYh4c+IfBFT6PxduxVI+TJm9OrSSmoEfD/0sUK/vv4PZP6fvnKL0mbGMm/3d+6HkZmZKf3/OvENtwJCsFRgIqGIujWrcP/hY4WXmgY9fEpsfAIj+38lXYQTkpKJeBEj409TQ520tLzHH5RMG+dMPz0jg3c54i7qGFIUl7I0srLAPziccqVN5caNej4HvnKmrUxfK442Kw4KKm9R8lKca4cyYy37Roic5goAgbkuxy3qGlDc80d2nIrGm0sbR8KeRHL2qh9PIqPp0MJe+iwzM5PI6Nh847WsURkDPR32HDlP04ay15s0t69PwINw7j98UiQTpKLKKCVBiQpyVSqU4YrvPe4GhfH4WRRb9p3g2IUbSoVt6WhDrSoVGDRpEccv3OTeg0dcuunPrxv28CY5Nc9wnVs7cjc4nEOnr+a7rRoRFYP3pn34BYYSGR3LtduBnLzkg80HI83caGqoU7l8abYdOEVEVAyRHzGJvE3P4HHEC2JevSYgOJx7Dx7JTWB9POYyecHaPOOoVbUCBvq67D16gdaOtlL3Fg7WnL9+h7hXCTTK53JDi6oVuH0vhKu37vH0eTQpJSzU6eloM7SvC3O9t3L03HXCnjxn37GLLP19D8P6ueR5tYmyjBnoyuY9x1n6+x58/R9wOzCUfccucvjMVSBL5T/A1ZnJC9aw4+Bp/IPDuOF3n017juEfHAZAu2YNKW1mzNg5K7gbFEZgyGNmL9vMk8joQudnwDcdiHudwA8L13Ev5BF37j9kzJwVADLGu4VBV0eLCV6ruOJ7j5DwZ3gt30pAcDjuCrQHBTHW3ZW9R8+zfudfhD6O4JJPAGPmrKCZnVWhDXCrVixDRFQMJy/58uz5S05c9GHF5gMK32h/33WEgyevEPbkOTsPnWXznuNyGvCU1DTGe63iblAY/sFheMz0Rl9Ph65KXjXRuoktNatUYNjUJZy5epu7QWHsOXKekPBnVChrhpqaKjsOnSEiKgYf/2A8ZnrLXWlUq1pF/jpzjZBHETx6FqUwnZJo42yqVynP+3fv2bT7GJHRscTGJxR5DCmKS1nce3Yk/OlzxsxZwWXfAAKCwzlx0Yd1Ow4rHYcyfa042qw4KKi8RclLca4dyoy1SuXMKWduyprth0hNe8ub5FRWbzvIw8eRMnF9zBpQnPMH5D3ejA31adnYhoWrd9CkYV2pGQHA77uO0rrP+HzrU0UioaFVLa7euicnyFl9uIkgNPwZ9taFF+SKKqOUBCW6tTp6QHeiY+MZ4LkAFYkKrZvYsnb+BPqNmVdgWFUVFdYt9GTJ+t3MWbaF+MQkjA31adqwnlS7p4iy5iY0qFuDWwEhdGrlkKc/bS1NAkMesf3P0yS9ScHczIiubZvg3qtTnmG8PAcxc+lGnN0m0aOjE7PGDSiwHIp4m56OvXVtGXsuANu6NVm3YAI62lqEPo5gQI+8r+SQSCTYW9fm5EUfmRNM9jaWpKSkYVmzcr6TS6dWDly9FcioGd6oqqqwa+XMIpWlMHh81w0DPR2W/r6HyBexlCttyuiB3QutLldEK0cbVs4dw8otB9iw6wjq6mpUrViWkW5fS/14Du2FqbEhG/ccJyLqJbraWljVroaTfVb9qaqqsm6BJ3OWbeHbsVlXx3RoYc/k4X04c7Vwl2eaGhmw03s6C1btwG3cfN7znsa2ddi2bFqRt90qljVnSJ/OzF62mWdRL6lUzpxlszzyPG2dH44N6vLb3LF4b9qH96Z96Ovq0KGFPZ5DexY6LmcnO+7eD2Pmko0kJadgbVmdhT8MZcSPS+X8zvMcxLKNewlYHI6hgR7D+7nInX78un1TdHW0GfHjUhIS32BTpwYbf56ktLAvkUhYN38CC1btYPL8NaS9TadKhTIsnjocc1NjFkwawrKN+1i7/TCVypvjOaQnZ676ycQxyu1rJs5fg+vwGVQqZ87hDfPl0imJNs6mnLkpU0b2Y/W2gyzbuJe5493p2q5JkcZQXnEpQ5lSJuxcPoNFa//AY6Y3GRmZlDE3pUchDNiV6WvF0WbFQUHlLWpeimvtUGasqamp4j3bg59WbqOpqwfq6mrSewtzfsbuY9aA4pw/IP/x1t25Od9P/5UR33aVCWOor4uxoZ6czWhu7Kxrc+7aHZo0qivjriKR0LRRPe7cf1gou7tsiiqjlASS9yVhKSnIk8zMTLoPn0nLxjYM69sFHW0tMt+9IyIqhsGTF9Gzc0ua2VnRe9QcLu1d/o+raAWfJzsPnWHDrqOc2Jr/N18/V7Iv2L6wa1m+hzHGzV2Jro52sX+pQSAQfJlcvHGXUTO9ubJ3RZ536/3X+Ty/J/Qv5nFENEEPnzCoVyepwbKqigqVyplToUwpEpKSuXP/Ic4t7IUQJ5Dh3/DOpUwZ/g3lFAgExcPuI+dxdrITQlw+/Ke+tfo5UL6MGYb6umzcdZR+3dqiq6NNdMwrDpy4hI9/MJNH9MGiWkWlLj4VCAQCgeDfRkLiG+IT33D26m3OXr2t8LOFgr8Rgtw/jKaGOhsXTWbJ77vZPnAKiW9SMDbUo0Hdmmxf9iMW+XxPUiAQCASCfztXbwUyecEazEyMWDxtRL7fWRYIGzmBQCAQCASCLxZhIycQCAQCgUDwhSIEOYFAIBAIBIIvFCHICQQCgUAgEHyhCEFOIBAIBAKB4AtFCHICgUAgEAgEXyhCkBMIBAKBQCD4QhGCnEAgEAgEAsEXyn9WkBs3dyXTFv/+qbPxWWPl7I5Faze53w2/+wCs3XGYLu5TFIY9e9WPOm0HyMV3/PxNOb83/O4z4sdfafbNaOp3GETrvuOZvGAtkdGxxV6mT42z2ySZD1c3dfXg4Mkref7/nJi6aD0/r975j6crxmoW/2TfUCatnYfO0O5bz38kP8XBvmMX6Tny3/OFAEXlOXjyCu2+9cTK2Z0hPyz+RDkT/NOILzsI8mX84G9wsLGUcatRpXyxxb9573EW/Lad9k52TB/VHz09bZ5GRnPumh8GejrFls7ngp11bczNjD51NoqEZY3KaGtqlGgaHjO9GdbXhXoWVUs0HUHh2Lr/JG/T0xnUs9OnzkqRqVjOnPq1q33qbBQbucsTE/eaKYvWMeX7vrRwsCYz890nzJ3gn0QIcoJ8qVK+DDZ1apRI3PcePGLhqh1MGNKTwb07//2gIfR2aV0iaX5qvCa4f+osFJn+3dqVaPypaW+54nuPYX1dSjQdQeE5e+U2Te3qfepsfBR29S2wq2/xqbNRbOQuT+jjCDIz39GnaxtUVf6zm23/SUqstc9cvU3d9gOJT0iScQ95FIFFazdCH0cAEBz2lEGTfsam0xAadBnKqBnLiIiKkQlj0dqNizfuyrhdvHFXbusuJxkZmSxcvYMWvcZi5exO677jmbV0Ezm/SKaqosKa7Ydo0WssNp2G8M33s/DxD5aJJz4hiamL1tO420jqdxhEz5GzpVuLAMOmLmHqovUyYdzGz8ex+0je5Uhr7Y7DdBr4g1w+uw+bwYJV2+XcR89azpjZK6T/j1+4yddDp2Pl7E5TVw/mrdxGatpb6fMbfvexaO1G2tt0mXgWr9vFAM+F0v/j5q5k6/6TnLjoQ9t+nlh3HMyr14l51mNJsmXfCapXKc+gXkV7y49PSKJee3fOXL0t45757h1NXT3Y/udpqb/82hCg/7ifWLxul4xb2tt0LFq74ev/QC7tgOBwLFq7yWz/3rwbjEVrN/534OTfecnMxLbzUI6euw58/PZYU1cPgsOesnzzfuy+GoHriJnSZ0fPXafzwCnUa+9Oh+8mc/KSr0zYzHfvWLF5P637jqdee3da9RnP7r/OSZ/fD31C//Hzqd9hEE17eLByywGZ8ZJzi7PLoKksWb9bLn/Dpi5h0vw10vS8N+3DqecYrJzd+XbsPEIeRSgs14Owp7TqM46k5BRcR8zEorUbPyxcK32uzFh99TqRifNX08hlOA26DMXzp9UkJCXnWZcFzRH+wWGMnLGM5t+MwabTEHqPmoN/cJg0fNiT59h2HsqtgBB6jJhF/Q6DaNV7HFv3nyQyOpbvp/9Kgy5DaerqwRzvLaRnZMikX1B7KSI5NZWpi9Zj13U49l+N4Pvpv8qZIBRUD5nv3vHb1j9xGTwNm46Dceo5hoWrd8jMV9m8e/+ezgOncNk3gJ9X78SitRut+oz/u11UVTlz5RZdBk2lfodBtHebxM5DZ/Mtg6//A/qO8cK281Dsug6nz+i5MuOxoPzvPHQGj5nehDyKoOfI2Vg5u7No7R9YObuTlJwik1b2OH0SGa1wKzgw5DHuE7PWn4ZdhtHbYy4pH+bVgvqvMmtMTl7GxjNr6Sba9vOkfodBOLtN4s+Tl2X87Dlynk4Df8gag64efD/9V17EvFIYX87yLF63i1EzvHn//j112g7AorUbUS/jZPx/yjW5qPNlUfrCqQ/jqDDzz5dOiQlyLezrY6Svx7HzN2Tcj5y9hkW1itSoXJ7IFzH0GzMPczNjtiyZwrr5nmRkZtJvrFe+E7AyrNv5Fxdv3OXXGSM5vGE+M8d8R61qFZFIJFI/xy/cJDDkMctmjmL3b7OoXKEMo2Z48yY5FcjqVIMmLyIk/BlLp3/PzuUzsLexZNDkRfgFhgLgYGOJT46FPvFNMn6BoWRkviMgOFzq7nM3WG6LEqBbh+b8dea6zCSanJLK+et36NLGEYCTl3yZ4LWKzq0c2LNqNvMmDuLC9TuMm7OySHVz7uptFq/9gzHurqycOxZjQ/08/b57/47MzMy/f+/k1fUyzz/83r8vWK1/xfce7Zo1lGmTwmBkoEcrRxsOnZIVjG7eCeJVQhIdWtgr1YZFoU6tKujr6sgIeeeu3sbESJ/z1+5I3QJDH5Ockoq9grYvKr+s28WtgAf8/MMwpnv0B7LqcsK81bh2dOLAurn0dmnF2DkruB/6RBru19/3sGnPcQb36szWpVMYM7A7FtUrAVnCrtv4n6hQxozdv81izriBbN1/kv8dOKUwDy5tHKWTbTYJiW+47BMg7bcrNu1nx8HT/OjRn32r51CxnDmDJv0s8wKSTfUq5Tmw1guAbcumcfuvtcwZ/7f2sqCxCjBqhjdhT56zbsEENi6aTPjT50zL9ZKVk4LmiLj4RKpVLMsqr7HsXTUbfT0dRs9aIbNQJ6ekMuvXTXh5unPr8FpG9P8Kr+Vb6T/uJ7q2bcK1/b+x6ZcfOHbuBn8c/lvAUaa9FOG9cR/lzE3Z9us01syfQOSLWKb/skHGT0H1oKqiQsSLGEb1/5o/189jyvd92brvJPuPX5RLT0UiYfdvM6lVrSLjBvXg9l9rObJpgfR5TNxrVmw+wA8j+nJw/Tx6d2nFzKUbuZ3H2EpNe8vQKb/gZF+fA2vnsvmXKXRoYU8p07/NDZRpx8cRLxg5YxkdWtizdv4E3Lq3Q1NDQ8b+FODI2evUr12NSuXM5fISF59I/3E/oaWlwYrZo1kxZzSdWzeWmg8U1H+VWWNyIlGRkJyahpenO4c3zKdjS3umLFzH42dRANwKCMFrxf8YN+gbDm+Yj/csD+rUqIypkYHC+HIyyu1rFkwegqqKCrf/Wsvtv9ZS2sxYxs+nXJOLOl8WpS80tKoFFG7++dIpsa1VVVVVXNo6cvDUFZltsqPnbuDaoTkAG/ccp2xpU+ZPGiJ9Xt9yNK37TOCPQ2cY0qdLkdN/GhmNVe1q2NatCUDl8qXl/KipqbJ42nDU1bKqYdbY72jYZRjBYU9pUK8mZ6/6EfzwKWd3LJFONHVqVuZJxAtWbDnA+gWeONhasnD1Dl7GxlPK1IhLN/2pXrkcFcqU4vz1O9SvXY13799zKyAE1w5OcnlwaePIwlU7uH47EMcGdYGsgwLqamq0cLAGYMXm/fTq0kpaHxbVKlLO3BSXwdO4c/8h1pbVC1U3N+4EcXTTQiqULVWg37G5hEVtLU38jqyT/g95FEGddgMVhs1PvZ/57h0xcfFUKFNwHvKjW4fmjJuzkjfJqejqaAFZmo4mDetiYqTPqUu+BbZhUVCRSLCrb4HP3SBcPgguZ6/50b97e9ZsO0Rq2lu0NDXwufuAmlXKKzUZK0tEVAwH13uhqqoqdVu+eR9ft2+Ke8+OANSoXJ7rfkGs23mYJT9+z+vEN2zZdwKvCe64tG0CIB0bAJv2HMPYyAAvz0GoqqhgUa0iT59Hs3rbQYVbqi5tHFn6+x7uPXhE3VpVADh52Rd9PW2aNqxLQlIym/YeZ9bY72jfvBEAc8YPxKnnGA6cuExvl1Yy8amqqKCtpQmAloYGOtpaMs8LGqtXfO9x+14IJ7YukvbruePd6TZsOmFPnlOtUlm5MhQ0R7RwsJaOQYDxg3vy9dAfiY6Nl1kkB/fqTO0PAnHPzi1Zsn43tnVq0KGFPQA1q5SnffNG+Nx9wLdft1OqvfKioVUtRn3XTfp/9IBujJ61gnfv36MikShdD/M8B0njqFy+NIdOX+X2vVCFc5SOthYqEgnq6mpy7ZL4JpkFk4dQq1pFANx7duTQ6StcuxWIrQKTjLjXiSQlp9C1bRPKlTYDssZjNsrmPzjsKYunjZCOPYB2zRty9Nx1vmrXVOp29PwNBvZwVliXG3YfxczEkBVzxqDyQfjKnoOV6b/KrDE5MTM25Ocpw6T/Rw90Zfufp7kbFEblCmV4EvkCM2ND2jRtgIpEQqVy5lKhpCC0NDXQ1FAHkGujbD7lmlyU+fJj+kJh558vnRLdSO/u3JxbASFStWxw2FMePYuic+vGAPj6B+NkZyUTRl1NjaaN6nLzbrBcfIXhq/ZNOXTqKtN/2UBI+DOFfurXri5dGAD0dLTR09HmZVx8Vv4CHlC7eiWZt0WAlo1t8LkTBGQZgBvo6+IbkPWmcfaqH/bWlthZ1+bC9aw3jeCHT0lKTsHeprZcHrK1SgdzaJWOnrtO++aN0FBXIzkllaCHT3ByqC8Trla1ipQrbVakerK3sVRKiAOYOLQXe1fNlv62L/tR5nmlcuYyz7N/U0f2yz/i9++zFh+VomnjsmlhXx9dbS1OXPQBsgTEExd96JLdx5Row6LiYGuJz92sdn/2/CXhT57Tt2sbtDQ1uPEh7pt3g2hsW+ej0snNV+2byghx796/xz8onJaNbWT81a9djaCHWRqekEfPSE17S5umDRTG6Rf4ECc7Kxnhu37t6sTEvSY2PkHOf7nSZtjUqcGRs9ekbkfOXqdDC3tUVVUJCX9Gckouus24AAAfrklEQVSqTJ7U1dSoXb2SNE+FoaCx6nc/lOpVysv0a8salVBXUyM4THF6yswRsmU2BbI0jzmpXb2izH8jAz2pYCN1M9ST1qMy7ZUXuRf2MqVMSc/IkOapKPUAUM7cVK5cyqCnoy1X1jKlTImJe51nOk0a1uW7CQvYf/ySnHZE2fxraWpI15FsurRx5NLNAKnmyC8wlBcv4+jUStZfNv5BYbRuYisV4nKiTP8tbP/JjYpEQulSJrxOyqr3ZnZWpGdkMGDCAi77BuS5RfsxfMo1ubDz5cf0heKefz53SvSwQ61qFbGsUYlDp68wvF9Xjp27gW3dGtI3saQ3KQq39UwMDQh/GvVRaTvYWLJr5UxWbz/IV0On42hryfTRblSpUEbqx9hQTy6cRCKBD+Mn8U2KQj8mRvqkpL3lbXoGGupqH940gmnvZMeFG3dZ8uP3lCttyvyV24iLT+Tm3SBqVa2Q5xZmN+dmTPxpDbPGDiAzM5MLN+6yet54aR6y8qqgnoz0eZ3L3kEZcqvc86NiWfN8TxBqaqgrfP4yj4k8G1VVVYwM9Ih88XFXjGS9ZTbh0KkrdHNuxg2/+ySnpNGuWdZbmLJtWBQcbOqwYNUO4hOSOHvND5u6NTAy0KNZIyvOX7tDczsrfO4GM2/i4I8qY25yt19s3GvSMzKYvGAtU3/+e9shPePvskXHxKOlKa/pyuZ5dCy3Ah5w4MTfNjvvPmyPv4yNV6hR7Nq2Cev/+IuJw3oTn5DE1VuBjOz/dVZ8L7PatU3fCTJhUtLS8sxDfhQ0VqOi43j4OJJGLsNl/GRkZvIyNl5hnAXNEVd877F1/0lCHz0jOSUtz4XVQF9Xzk1HW1Pe44fwyrRXXuSeB7K38bLzpkw9JCWn8PvOI5y77kdMXALv3r3jTXIKTjm0j8qiaF7Kapa8hZDV88az4+BpvDft4+c1Oxjax4UBPZyRSCRKt2MpEyM5AayxbR0MDXQ5dcmX7h2ac+TsdRxsLTEzMVSYj+jYVzTPJbRko0z/VWaNycmjZ1Gs/+MItwIe8DohS3h7lWP+NjM2ZO+q2azdcZjRM5dTppQJk4b3ltEKfyyfdk0u3Hz5MX2huOefz50SP7Xazbk5u4+cZ3i/rhw9fwO37u2lz4wN9RW+7ce9TsBQweSYk1evCxZg6tSsjPdMDyJfxPDTym30He3FqW2LpQ1ZkG2WsYEegSGP5PMXn4i2poZ00m1sY8m+45e4e/8hb9PTsbO2QF1NjQplS3Hppn+WfVw+WpkWDtZoaqhz/vod0tMz0NfTwcE2y0bAyEAPiURC7CsF9RSfiKHBhwUuj7IoOshQVJu04samTg3OX7/D9/2/+qh4undoTreh04mLT+TY+Ru0bGwj3WZVtg0V1V9Bh0BqV6+IgV6W3ce5a360crQFstpz+eZ9hD5qRWJSMvbW8prYj0GCbF6z+/Oc8QNpkGO7FLLscgBMjQ1ITXtLSmqadAszJ7o6WvTs3JKB33SUe5bXdSkdW9rjteJ/+AeHERT6hNJmxtjWzdpO09XWRkUiYe+qWaipyU4zWlqFv8KkoD6rq6NFnRqVWTZzlNwzQ4O855K85ojg8GcMnfIL00f3Z/HU4ejqaPEi5hVOPccUOu+5Uaa98iJ32+dGmXoYN2clSckpLJo6nGqVyqEikfDjLxuKpJErylSiqaHOgB4d6N+9PUfOXmfOss1kZmYyuHdnpdtRUbqqKip0atWYY+dv0M25Gccv3GT0gO555sPUyCDPMa5s/y1ojckmOSWVvqO9aO9kx6bFkzE3zXoZa99/oow/MxNDpo7sx5iBrmzYfZThU5ewc8WMQpvP5MenWpMLO19+TF8o7vnnc6fEzyi7tHEk7EkkZ6/68SQyWmo3AmBvXZsL1+/KvOlmZGRy2eeezBaCmbEhcbkGXMCDcJSlXGkzFk0dTmx8Ak8io5UO52BjSXDYU7lTQ+eu+8nkz8G2Dg/CnnL68i2a2VlJt4BaOFhz5VYAvgEPcLTN29g9W6t08pIPpy770qmlg3SLS1NDHWvL6py/fkcmTEj4MyJfxEjzYWac9dYZFy9bT4qEmM+FXl1a4hcYyoETl/L1l5KaRkZGZp7PLapVxKJ6RU5d9uX0ldtSY3tQvg3NjA2IyzWB3XvwKN98SSQS7K1rc/NuED53g6UTU3N7K549f8mfp65Qu0alAifAj0VXRwvLGpUJeviE8mXMZH7lzLO2A2tUKY+6mhpnr/opjKORlQX+weGUK20qF4e6muL3PWNDfZo2rMvJi76cuOhD59aNpQKXtWV1VFRVeBwZLRdfXvaC2TY+qW8Lb4zcyMqCsKeRaGlpyKWnp6NdYPjcc8S1W4GUL2NGry6tpC8F90MfFzpfilCmvYpKQfXw/v17rt4KpN/XbalRubxUkxFUwCELTQ110tLS8/VTWFRVVHBp48g3nVpKD0d8bDu6tHHkiu89rt4KJC4+gfZOjfL0W6dmFc5e81OoaS1s/y1ojQl6+JTY+ARG9v9KKsQlJCUT8SJGzi9k9RGP77pRp1YV7nzEoSxFfKo1ubDz5cf0haLMP18yJa6RMzbUp2VjGxau3iE1QM9mwDcd2HP0PD8sXIeba3syMjJZu+MwIHuPmIOtJdv/PE3TRvUw1Nfl4k1/GZsyRWzdfxJzEyMsPtiv7Dl6AUN9XSrnofZWRNNG9WhQrxbfT/+VycN6Y2yoz5Gz1zl9+RZbfvn7iwa1qlbAQF+XvUcvMGl4b6l7CwdrJi1YQ/zrJBrVz18r071DcwZ6LiQ9M1POAH+suytDp/xChTJmtHS0IerlK35a+T+a2VlJDYorlTOnnLkpa7Yf4ocRfcjMfMfW/Sd4+DiSBvWUM5hVxMOnkXLXb9SqVgF93Y+/rLd1kwb06NSCqT+vx8f/AW2aNEBPV5uXsfE8CH/KWPcevElOpUGXocwa+x19urbJM65u7ZuxYddRUlPfymxFKNuGDjZ1WLJ+F99+3Y7aNSoREv6M5Zv3F6gJcrCtw6qtf2JmYkjNDxclGxvqY1W7GnuPnpcxvC5Jxgx0xWOmN2qqqjjZ10dFVYXwJ8/R0FCjS2tHTI0M6NO1NbN+3URKahoW1SoSGR2LmpoqrR1tce/Zka6DpzFmzgp6dWmFoZ4ukdGxPH4Wla+Bs0ubJqzefpBnz1/iObSX1N3ESJ8Brs5MXrCG0QO6U8+iKikpaQSGPqahVS2sLOQvZtXUUKdy+dJsO3CKsqVMkUiQbvkUREtHG2pVqcCgSYsY8W1XKpQpxavXifj4BzOkdxepMJaT/OaIqhXLEBEVw8lLvlhWr0Rg6GPWbj9cbNrsgtqrqChTD5XLl+bw6avUq1WVtLfpbP/zFFExcVIbQEXUqlaRv85co13zRqirqea5fVgQEVEx7D12ASf7+pibGfMk4gUnL/nQs3NLpfOfH/VrV6NcaVMWrd1JCwfrfOepQb06sevwWcbN/Y2+XVujqqrK7XshuLk6K9V/C7PGVChrhpqaKjsOncG1gxPPX8ayfNN+mUvPL9y4S0TUSxrUq4WejjY+/sGEPorAqhi1cfDp1uSscMrPlx/TF4oy/3zJ/CMXAnd3bs73039lxLddZdxNjQzY6T2dBat24DZuPu95T2PbOmxbNk1GizF1ZD/mem+l88ApZGRk0qBeTRZPG8HQfD5Boq2pwa8b9xD5IhYNdTXq1KzC+oUTC3UzvUQiYc28cSxa+wfj5v5GQtIbalevJHPEOdufvXVtTl70kREi7G0sSUlJw7Jm5QK/UmBRrSLmZsYkvUmRU6M7NqjLb3PH4r1pH96b9qGvq0OHFvZ4Du0p9aOmpor3bA9+WrmNpq4eqKur4ezUiInDessdyS8MyzbslXPbumRKsV2n4TXBnQZ1a7Lrr3P8deYaGRkZmBgZYPdBvR4c9hSAlo1t843HpW0TFq7ZyVftmsrYGSnbht90cuJZ1EtG/LiU1wlJVK1UFs+hvfK9vgKyttW9lm+lfyvZk50tG9uwbONeHGyK96BDXrRytGHl3DGs3HKADbuOoK6uRtWKZRnp9rXUz+ThfdDS1MB70z5iXr3G3NSY8YO/AaBMKRN2Lp/BorV/4DHTm4yMTMqYm9Kjo/wpxpy0bdaAGUs2ULGsORa5jN49h/bC1NiQjXuOExH1El1tLaxqV8PJPm+bHy/PQcxcuhFnt0n06OjErHEDlCq/qooK6xZ6smT9buYs20J8YtIHjWE91NRUFYbJb45wdrLj7v0wZi7ZSFJy1phc+MNQRvy4VKn8FIQy7VUUlKmHn6cMY9avm+g6eBoG+rr0/aoNk4b25tTlvO+xG+X2NRPnr8F1+AwqlTPn8Ib5RcqftpYmgSGP2P7naZLepGBuZkTXtk1w/3CXZFHaMTdd2jiycssBhvftmq+/0mbGbF4yhcVr/2DIlF9QVVWhXq2qfPfhlGtB/bcwa4y5qTELJg1h2cZ9rN1+mErlzfEc0pMzOTTk+no6HDhxmV/W7SYjI4NK5UvjNcFd4enfj+VTrMlQuPnyY/tCUeafLxXJ+5I4GpOLizfuMmqmN1f2rijwjeq/TM+Rs2nSsC5j3Xt86qx8NmzZe5xjF27KnZbNTUJSMs16eLB63niaNKz7D+VOIBAIvjzEmvzv4h/5jsfuI+dxdrITHSYfQsKf4R8URrf2zT51Vj4r7twPw7UArRDAwVOXMTMxlB4SEQgEAoFixJr876LEtlYTEt8Qn/iGs1dvc/bqbfauml1SSX3RPImMJiHxDTOWbqRjK4dC2fD9F/jlxxF5PkvPyCDyRSzhT5/jvXEf4wf3FN8YFAgEAgWINfnfS4kJcldvBTJ5wRrMTIxYPG2E3KWRgizGzllB+NMomttbMVtJeyBBFnHxibgOn4mamir9vmr7r7utWyAQCIoLsSb/e/lHbOQEAoFAIBAIBMWP2IcSCAQCgUAg+EIRgpxAIBAIBALBF4oQ5AQCgUAgEAi+UIQgJxAIBAKBQPCFIgQ5gUAgEAgEgi8UIcgJBAKBQCAQfKGUqCD38HEkzb8ZQ3Tsq5JM5h+j+/AZ0g8IK8LZbdJHfdc0P8bNXcm0xb+XSNyC/yaF6a9TF63n59U78/XzbxvvAoFA8CVQooKcsaE+NnWqo6P93/gMiJ11bczNjD46Ho+Z3gQEhxdDjr483r1/T5t+Ez51Nv4TFKa/WtaoTLVKZaX/H4Q9ZdjUJTJ+/mvjXSAQCD4HSuzLDgAmRvosnz26JJP4rPCa4P7RcaSmveWK7z2G9XUphhx9efjdCyX2VcKnzsZ/gsL01/7d2sn8P33ltpyf/9p4FwgEgs+BEtXIhT15jkVrN6Jexkn/23Yein9wGN+OnYdNx8E0/2YMXsu3kvY2XRrO1/8Bfcd4Ydt5KHZdh9Nn9Fxu+N0H4IbffSxau8n4B1i8bhcDPBdK/4+bu5Kt+09y4qIPbft5Yt1xMK9eJwLw6nUiE+evppHLcBp0GYrnT6tJSEqWie+STwBfDfmReu3dafetJ/uPX0JDXT3f8jZ19eDgySsy/89e9WOC1yrsug7H7qsRjPjxVyKjYxWGfxD2lFZ9xpGUnILriJlYtHbjh4Vrpc9VVVRYs/0QLXqNxabTEL75fhY+/sEycShTttwEhjzGfeLP2HQaQsMuw+jtMZeUtLdAlobst//9Seu+46nX3p32bpPY/udpmfDLN++n50j57/b1HDmb3/73p9L1sf/4Jb6bMJ+U1DQsWrth0dqNfccuSsMGhz1l+eb92H01AtcRM1mwajvdh82QS/d/B07StIcHme/eybh3HzaDBau2y/kfPWs5Y2avkP4/fuEmXw+djpWzO01dPZi3chupH+oDlO+DuXkZG8+spZto28+T+h0G4ew2iT9PXpbxo6ic2Rw9d53OA6dQr707Hb6bzMlLvnmm1WXQVJas3y3nPmzqEibNXyNNK2d/3XPkPJ0G/kD9DoNo6urB99N/5UVM1jZpzq39mUs38uuGPZy75idtp4iomCKP99eJb5jy8zqa9vCgdpvvpHFatHYjJPxZnmUUCAQCQQlr5BSRnJLKxJ/W8NPEwdSvXY3wp88ZNHkR5mbGDO3ThdS0twyd8gtD+nRm/qQhvElO5ebdIEqZFn7L8tzV2zx9/pIx7q4YG+pjbKgPwKgZ3qS+fcu6BRNQUVFhjvcWpi1aL9UmPHv+kuFTl+DasTmLpgzjVUISi9b8weOIKFo3sS1UHib+tJrpo/vjNXEQKSlpTJj3G9N/2cDvCyfK+a1epTwH1nrh1HMM25ZNo06Nyqip/d1Exy/cpLFtHZbNHIWujjZrth9i1AxvTm/7BV0dLaXKlpu4+ET6j/sJB1tLVswejaqqCqGPI9HW1ABg/m/bOXLmGlNH9qNW1Qr4BYYyf9V20t6mM/CbDoWqi4Lqo0ubxmioqzFt8e9c2bscAE2Nv4XnX9btIj0jg59/GIaxoR46Wpps3H2MsCfPZbb9jp69QaeWDqiqyL6ndOvQnLXbDzNpeB9UJBIgqz+ev36HxdNGAHDyki8TvFYxZmB3nByseR4dy/zftvMsMppV88YVurw5kahISE5Nw8vTnXKlzdh37AJTFq7DxrI6lSuUybOcAFd87zFh3mo8h/TEyaE+l276M3bOCvb8NhvLGpXk0nJp48ieI+cZP/gbqVtC4hsu+wTwm9dYOf+3AkLwWvE/Fk0djkW1iryMjefqrXuYGhnI+Z3yfT9UVFR4+vwl3jNHAaCtpUn40yg5vwWNd4BZv24iPiGJw7//hKaGOl7L/4dPwAO2L5uGmbFhIWtZIBAI/lv844IcwJDenWlQryYANatW4Kt2Tbl66x5D+3Qh7nUiSckpdG3bhHKlzQCoU7NykdK5cSeIo5sWUqFsKanbFd973L4Xwomti6Tuc8e7023YdKlAsHnvcapUKM3scQOl4ZbPHk2rPoVfyFs3seWrdk0B0NbUYFDPToz4cSnv3r+XChPZqKqooK2lCYCWhoacrZGamiqLpw1H/YNwN2vsdzTsMozgsKc0qFdTqbLlZsPuo5iZGLJizhhpfhwb1AWyhLxtB06xdPr3ODvZAVntlZaezrINe+n3dVs01AvXhfKrD3U1NangpsjOKiIqhoPrvVBVVZW61a1ZhUOnrjDG3RWA6NhX+AY8YNLw3nLhXdo4snDVDq7fDpSW8exVP9TV1GjhYA3Ais376dWlFUM+CBkW1SpSztwUl8HTuHP/IdaW1QtV3pyYGRvy85Rh0v+jB7qy/c/T3A0KkxHkFJVz+eZ9fN2+Ke49OwJQo3J5rvsFsW7nYZb8+L3Csi79fQ/3Hjyibq0qAJy87Iu+njZNG9aV8/8k8gVmxoa0adoAFYmESuXMaWhVS2E5tDQ1UFNTQ1VFRSl7uPzGO8C5a3dYPstD+qI1/NuutPvWk/fvxGegBQKBoCA+yfUj2QtLNmYmhryMew1AOXNTmjSsy3cTFrD/+CWZLa3CYm9jKSPEAfjdD6V6lfIy7pY1KqGupkZw2BMA7oc+xsneWiZcWXMTypcxK3Qe6uUqq6mxIW/TM0hIfFPouOrXri4V4gD0dLTR09HmZVw8oFzZcuMfFEbrJrZyQiXAnfsPyczMxMm+vox7q8a2JCQl5xlnfnxMfXzVvqmMcANZWraDp//eHjx2/iYVypRSKHAZGejRytGGg6f+9n/03HXaN2+EhroaySmpBD18gpODbHlrVatIudJm3LwbnDvKj0JFIqF0KRNeJ8mWPXc5371/j39QOC0b28j4q1+7GkEPFbdBudJm2NSpwZGz16RuR85ep0MLe7k6BGhmZ0V6RgYDJizgsm8A798XnxCV33jPfPeOtLfpqOTQnmZrUt8jBDmBQCAoiE+ikTMy0JN3zLFwrJ43nh0HT+O9aR8/r9nB0D4uDOjhjESBsJEfpc2M5dyiouN4+DiSRi7DZdwzMjN5GZslEMW8SsDESF8urIGeTqHSBzAykI8HKNJCmb3NlhOJREL2eqdM2XITHfuK5nZWCp8lvUlGS1NDqiXMJrtuXicUXhj9mPpQ1J7ZWrbb90KwrVuTY+dv0KVN4zzj6ObcjIk/rWHW2AFkZmZy4cZdVs8bD0DimxQAqWYoJyZG+rxOSCowj/nx6FkU6/84wq2AB9K6e6UgztzljI17TXpGBpMXrGXqz+ul7ukZGflqRLu2bcL6P/5i4rDexCckcfVWICP7f63Qr5mxIXtXzWbtjsOMnrmcMqVMmDS8t1RT+THkN95VVVRobmfFxt1HqVOzMpqaGqzdcQhry+qYm8q3t0AgEAhk+SSCXEFoaqgzoEcH+ndvz5Gz15mzbDOZmZkM7t0Z8hDmsg8y5ESR4Kero0WdGpVZ9sG2JyeGBrpA1qIdr0BDVNChgZKmIEFWmbLlxtTIQGHdQZZAk5r2luSUVJkttLj4RJk488pVXvEWFYmClIwM9GjZ2Jq/zlyjfBkzbgWEMGf8QAWhs2jhYI2mhjrnr98hPT0DfT0dHGwtpXFJJBKFp2bj4hMxzBZICtEHs0lOSaXvaC/aO9mxafFkqZDSvr+8rWTucmbX/ZzxA2lQt6asX5W8+0THlvZ4rfgf/sFhBIU+obSZMbZ1a+Tp38zEkKkj+zFmoCsbdh9l+NQl7Fwx46O2k5Vhnucgeo6aTas+49HT0cbeujYr5ojTrwKBQKAMn6Ugl42qigoubRy5H/KY24GhAFLj57j4RMqam0j9BoY8wlBfwZt/LhpZWfDH4bNoaWkoNOSGLLuoa7cCZdyiY1/xPDquqEVRmmwbsdS3hd9SVqZsualTswpnr/nhObSXnKBoU7cGGupqnL9+h44tHaTu5675oaejTa2qFYEsASBbuMsmLj6xSPWlqaFOekaGQhvCvOjeoTmzl22hRpXy1KpagRqVy+fpV1VVFZe2TTh5yYeMjEyZQxGaGupYW1bn/PU7tHL8exszJPwZkS9ipDZjRemDQQ+fEhufwMj+X0kP7iQkJRPxIqbA8unqaGFZozJBD5/g0saxQP/ZGBvq07RhXU5e9OV+6GM6t26slFZbV0cLj++6ce6aH3cCQxUKcpoa6h9l9pCTm3eDKG1mzPEtP8uYDuTkZWw8+no6aH04hCMQCASCLD67T3RFRMXgvWkffoGhREbHcu12ICcv+WDzYTGpVM6ccuamrNl+iNS0t7xJTmX1toM8fBypVPwtHW2oVaUCgyYt4viFm9x78IhLN/35dcMe3iSnAjDA1ZnA0McsWLWdkPBn+Po/YPzc31BTLfnq0tRQp3L50mw7cIqIqBgilVjos1GmbLkZ1KsTkVExjJv7Gzf87uPr/4D1O//ibXoGejraDO3rwlzvrRw9d52wJ8/Zd+wiS3/fw7B+LlKhs1F9CyJfxHDw5BUy373jZWw8M5ZsQCUfbVFeVK9Snvfv3rNp9zEio2OJjS/4TrkWDtakp2ewac9xpQSd7h2ac/lmAJd8AuiSy/9Yd1f2Hj3P+p1/Efo4gks+AYyZs4JmdlbY1snSZhWlD1Yoa4aamio7Dp0hIioGH/9gPGZ6K71dP2agK5v3HGfp73vw9X/A7cBQ9h27yOEzV/MN59KmCacu+3Ld736+dXPhxl12HDxNcNhTIqJi+PPkZUIfRWCVhzbOomoFbt8L4eqtezx9Hi29rqYohDyKQE1VlYDgcAKCwwl5FCETX3pGBs7fTWLOsi1FTkMgEAj+rXx2GjltLU0CQx6x/c/TJL1JwdzMiK5tm+DeqxOQdXLTe7YHP63cRlNXD9TV1XB2asTEYb2V+tyQqooK6xZ6smT9buYs20J8YtIHzUU91NSyjMArVyjDKq+xLF63i637T2JuYsR3PZwpX6ZUAbEXD16eg5i5dCPObpPo0dGJWeMGKBVOmbLlprSZMZuXTGHx2j8YMuUXVFVVqFerKt/1cAbA47tuGOjpsPT3PUS+iKVcaVNGD+yOW/f20jhqVC7PgslDWbXtT2Ys2YCJsQFu3dujra2pMM38KGduypSR/Vi97SDLNu5l7nh3urZrkn+5VVXp0saRLftO0FkJQc6iWkXMzYxJepMip21ybFCX3+aOxXvTPrw37UNfV4cOLezxHNpT6qcofdDc1JgFk4awbOM+1m4/TKXy5ngO6cmZq35K1Aq0crRh5dwxrNxygA27jqCurkbVimUZ6abY5i2bts0aMGPJBiqWNceiWsU8/enr6XDgxGV+WbebjIwMKpUvjdcEd6nwmptOrRy4eiuQUTO8UVVVYdfKmQr9KUM9i6qs/t9BenvMlbrpaGvxo8e3uHZwAsDMxKhYvpoiEAgE/zYk74vzeJpA8IlYvG4Xvv7B7PCerpT/niNn06RhXca69yjhnAny49j5GyxcvZNVXmOpVa0iKhIJySmpbP/zNCu2HODWX2uV3mIXCASC/yKf3daqQFBY0jMy+PPEZXp0bKGU/5DwZ/gHhdGtfbMSzpmgII5fuEnbZg2pXb2SVGDT0daioVUt0tLekp6e8YlzKBAIBJ83n93WqkCgLC9iXpGcksrqbYdQV886xJAfTyKjSUh8w4ylG+nYykHmEl7Bp6FurarsPHgGlzaO1KxagdTUNO6FPGbh6h10bOUg82UPgUAgEMgjBDnBF8vvfxxh56EzWNaozGqvcQV+ZWLsnBWEP42iub0Vs5W0OxSULAO/6UBqahqe81bxPDoWVRUVKlcoQ+dWDgz6YBcrEAgEgrwRNnICgUAgEAgEXyjCRk4gEAgEAoHgC0UIcgKBQCAQCARfKEKQEwgEAoFAIPhCEYKcQCAQCAQCwReKEOQEAoFAIBAIvlCEICcQCAQCgUDwhSIEOYFAIBAIBIIvFCHICQQCgUAgEHyh/B8I3d6jKBATewAAAABJRU5ErkJggg==)",
"_____no_output_____"
],
[
"After requesting it, I had to wait for two weeks for it to arrive in the mail.",
"_____no_output_____"
],
[
"## Declaring a *médecin traitant*\n\nA *médecin traitant* is just your general practitioner doctor. In order to have the reimbursements work smoothly, you should declare one. \n\nIn the same letter where my definitive number came, I got a form to declare a *médecin traitant*. I had to look for a doctor who spoke English or Spanish in *Doctolib* (see below). \n\nOnce in the appointment, I had to ask the doctor to become my *médecin traitant*, they accepted and registered it in the system in just a couple of seconds.",
"_____no_output_____"
],
[
"## Optional: Getting a *mutuelle*",
"_____no_output_____"
],
[
"The coverage of the French social security system is *partial*: you will still have to pay for doctor appointments, medicine, etc. To cover the remaining part of the cost, the French use a *mutuelle*, which is a kind of personal insurance, usually pay monthly.\n\nAs an LCT student, I had an insurance provided by the programme, so I did **not** have to subscribe to a *mutuelle*. I am including it here for completeness.\n",
"_____no_output_____"
],
[
"## Optional: Applying for the *Complementaire*",
"_____no_output_____"
],
[
"Again, the coverage of the French social security sytem is *partial*. Of course, each person has different economic circumsntances. The *Complementaire* exists for people who do not have enough ressources for covering all their health expenses: it entitles the holder for **total** coverage in some cases.\n\nIn general, one should apply for the *Complementaire* online, but this service does not work for foreing residents in France: One has to deposit the necessary documents in the *CPAM* in person.\n\nI first had to go to the *CPAM* to ask for the necessary documents, and they gave me the following list for my particular situation (foreign student):\n- Passport photocopy\n- Student Visa photocopy\n- Certificate of enrolment in a French university\n- Certificate of *CAF* benefits (student housing subsidy) (to be explained in a future post)\n- Tax history in France *or* a sworn declaration of having no tax history in France\n- Residence Certificate (recent: < 3 months) : I got it at the reception in my residence\n- Proof of scholarship : I used the Erasmus Mundus scholarship letter\n- Student card photocopy\n- Proof of coverage by the public health system : *Attestation de droits* that I downloaded from my *Ameli.fr* account.\n- The application form for the *Complementaire*\n\n\n\n",
"_____no_output_____"
],
[
"The application form for the *Complementaire* can be found online, in the *Ameli.fr* website. It is a little complicated to understand, so I recommend filling it with the help of a French speaker. \n\nNote:\n- One has to declare income of the last year. As I had not been working for more than a year by the time I applied to the *Complementaire*, the Erasmus scholarship was my only income.\n",
"_____no_output_____"
],
[
"- Since I was a foreign student in France, I had not payed any taxes ever in France, so I had to write a sworn declaration stating that fact. I used a simple model:",
"_____no_output_____"
]
],
[
[
"#collapse-hide\n\"\"\"\nAttestation sur l’honneur de non-imposition\n\nJe soussigné <M./Mme MY LAST NAME my name> demeurant <my address>,\nAtteste sur l’honneur ne pas être soumis à l’impôt au titre des revenus de l’année 2020/2021.\nPour faire valoir ce que de droit\n\nA Nancy, le ________________\n\n\"\"\";",
"_____no_output_____"
]
],
[
[
"It roughly says \"I declare under oath that I have not been subject to taxation during the years 2020/2021\"",
"_____no_output_____"
],
[
"After having all the documents ready, I put them in an envelope and went with a francophone friend to the *CPAM*. There they told me to just deposit it on a box they had outside the building. Since I did not have an appointment, they told me they could not help me verify if my application was complete. \n\nLuckily, two months after this, I got a notification on my *Ameli.fr* account showing that my application had been accepted!\n\nTo start using the *Complementaire* it is necessary to go to a *Pharmacie* (see below) and ask for them to update your *Carte Vitale*. This only takes a few seconds.\n\nFinally, when I went to another medical appointment, I had to pay from my own money but was reimboursed within a week!",
"_____no_output_____"
],
[
"# Using the French healthcare system\n\nAs I have stated above, you can start getting coverage from the French social security system as soon as you have your provisional number, though it may be complicated until you get a *Carte Vitale*, after which it is really simple.",
"_____no_output_____"
],
[
"## Doctolib",
"_____no_output_____"
],
[
"Most people use [*Doctolib*](https://www.doctolib.fr/) to book doctor appointments in France. They have a website and mobile apps. Here, you can look up general practitioners and specialists. You can even filter doctors by which language they speak.\n\n*Doctolib* is really easy to use, I'd recommend it!\n\n\nThe alternative to *Doctolib* is to call into a doctor's office and request an appointment.\n\n\nNote: you can use *Doctolib* without being registered in the social security system. In fact, this is what I did to reserve appointments for vaccination agains COVID (to be explained in a future post)",
"_____no_output_____"
],
[
"## Pharmacies\n\nIn France, unlike many countries, pharmacies are not a business, they are all about medicine and treaments. \n\n\nYou come in with your *ordonnance* (prescription), hand it in, show the *Carte Vitale*, and in a couple of minutes you will be walking out of the pharmacy with all you need.\n\n\nIn other countries I have seen pharmacies being run as convenience stores, so, for example, the clerks would offer you extra products to buy, or only give you information about cheaper medicine if you ask specifically ask for it. Not in France. \n\nThey are not difficult to find, however, they have somewhat inconvenient openning hours. Don't worry, at any time outside of business hours it is guaranteed that at least one pharmacy will be open in the city. You can check which pharmacy is currently \"standing guard\" (*Pharmacie de Garde*) in the [3237 Pharmacie hotline website](https://www.3237.fr/). These pharmacies are the only ones where the waiting line may be really long, though.\n\nThere is a pharmacy really close to my Student Residence:",
"_____no_output_____"
],
[
"<iframe src=\"https://www.google.com/maps/embed?pb=!3m2!1sen!2sfr!4v1649066967422!5m2!1sen!2sfr!6m8!1m7!1s7hrDS5lviE34rP7HtGXqRQ!2m2!1d48.70804283702478!2d6.166641784880013!3f221.5751746336359!4f3.8262345712196293!5f0.7820865974627469\" width=\"600\" height=\"450\" style=\"border:0;\" allowfullscreen=\"\" loading=\"lazy\" referrerpolicy=\"no-referrer-when-downgrade\"></iframe>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7fcff5f30e776f388a7215fa7b94d517c252e16 | 282,884 | ipynb | Jupyter Notebook | 2016/tutorial_final/79/Tutorial.ipynb | zeromtmu/practicaldatascience.github.io | 62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b | [
"MIT"
] | 1 | 2021-07-06T17:36:24.000Z | 2021-07-06T17:36:24.000Z | 2016/tutorial_final/79/Tutorial.ipynb | zeromtmu/practicaldatascience.github.io | 62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b | [
"MIT"
] | null | null | null | 2016/tutorial_final/79/Tutorial.ipynb | zeromtmu/practicaldatascience.github.io | 62950a3a3e7833552b0f2269cc3ee5c34a1d6d7b | [
"MIT"
] | 1 | 2021-07-06T17:36:34.000Z | 2021-07-06T17:36:34.000Z | 198.654494 | 64,632 | 0.85219 | [
[
[
"# Principal Component Analysis (PCA)",
"_____no_output_____"
],
[
"### Introduction",
"_____no_output_____"
],
[
"<p> The purpose of this tutorial is to provide the reader with an intuitive understanding for principal component analysis (PCA). PCA is a multivariate data analysis technique mainly used for dimensionality reduction of large data sets. Working with large data sets is usually very complex as they contain large amount of information, which is difficult to comprehend. In order to find the patterns and correlations hidden on this data, PCA projects the original data into a variance space that contains a smaller number of latent variables called principal components. Each of these principal components, explain the amount of variation of the original data and it allows to explore how the instances differ from, and are similar to one another. </p>",
"_____no_output_____"
],
[
"<p> For this tutorial, you will use the libraries listed below, which are already installed in Anaconda:</p>",
"_____no_output_____"
]
],
[
[
"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\nseaborn.set_style(\"white\")\nimport sklearn\nfrom sklearn.decomposition import PCA\n\n%matplotlib inline",
"_____no_output_____"
]
],
[
[
"<p>On this tutorial, we will go step by step on how to implement principal component analysis (PCA) and then, we will use the sklearn library to compare the results. This tutorial is composed by the following sections:</p>",
"_____no_output_____"
],
[
"<ol type=\"i\" style=\"margin-top:-10px\">\n<li><a href=\"#Data Loading\"> Data Loading</a></li>\n<li><a href=\"#Data Processing\"> Data Processing </a></li>\n<ul padding=0px; display=inline; style=\"margin-top:0px\" >\n <li type = \"disc\"> <a href=\"#Mean-Centering Data\"> Mean-Centering Data </a></li>\n <li type = \"disc\"><a href=\"#Scaling Data\"> Scaling Data </a></li>\n</ul>\n<li><a href=\"#Mathematical Computation\"> Mathematical Computation </a></li>\n<li><a href=\"#PCA with sklearn\"> PCA with sklearn </a></li>\n<li><a href=\"#Summary\"> Summary </a></li>\n<li><a href= \"#References\"> References </a></li>\n</ol>",
"_____no_output_____"
],
[
"<a id=\"Data Loading\"></a>",
"_____no_output_____"
],
[
"### Data Loading ",
"_____no_output_____"
],
[
"<p> The dataset that we will be exploring in this tutorial comes from the U.S Energy Information Administration website: <a href=\"http://www.eia.gov/\"> www.eia.gov </a>. The data is provided in a weekly basis from February of 1991 to September of 2015 for the following variables with their respective units: </p>\n\n<ul style=\"margin-left: 50px;\">\n <li type = \"disc\"> Exports of Crude Oil (Thousand Barrels per Day) </li>\n <li type = \"disc\"> Commercial Crude Oil Imports Excluding SPR (Thousand Barrels per Day) </li>\n <li type = \"disc\"> Ending Stocks of Crude Oil and Petroleum Products (Thousand Barrels) </li>\n <li type = \"disc\"> Field Production of Crude Oil (Thousand Barrels per Day) </li>\n <li type = \"disc\"> Ending Stocks excluding SPR of Crude Oil (Thousand Barrels)</li>\n <li type = \"disc\"> Refiner Net Input of Crude Oil (Thousand Barrels per Day) </li>\n <li type = \"disc\"> Cushing OK WTI Spot Price FOB (Dollars per Barrel) </li>\n <li type = \"disc\"> Europe Brent Spot Price FOB (Dollars per Barrel)</li>\n</ul>",
"_____no_output_____"
]
],
[
[
"dataset = pd.read_csv(\"tutorial_data1.csv\") \ndataset.head()",
"_____no_output_____"
]
],
[
[
"<p> Since the variables contain different dimensions, this will require some data processing that is described in the following section. </p>",
"_____no_output_____"
],
[
"<a id=\"Data Processing\" style=\"margin-top:0px\"></a>",
"_____no_output_____"
],
[
"### Data Processing",
"_____no_output_____"
],
[
"<a id=\"Mean-Centering Data\"></a>",
"_____no_output_____"
],
[
"#### Mean-Centering Data",
"_____no_output_____"
],
[
"<p> The first step before applying PCA to a data matrix $X$ is to mean-center the data by making the mean of each variable to be zero. This is accomplished by finding the mean of each variable and substract it off to each of the instances belonging to that variable. </p>",
"_____no_output_____"
]
],
[
[
"dataset_center = (dataset - dataset.mean())\ndataset_center.round(2).head()",
"_____no_output_____"
]
],
[
[
"<p> Now, we need to find the covariance matrix of our dataset to see how the variables are correlated to each other. This is described by the following equation:</p>",
"_____no_output_____"
],
[
"$$ Cov(X) = \\dfrac{\\sum_{i=1}^{N}(x_{i} - \\bar{x})(y_{i} - \\bar{y})}{N-1}$$",
"_____no_output_____"
],
[
"<p> where $N$ is the total number of rows, $\\bar{x}$ and $\\bar{y}$ are the means for the variables $x$ and $y$, respectively.</p>",
"_____no_output_____"
]
],
[
[
"data_cov = dataset_center.cov()\ndata_cov = data_cov - data_cov.mean()\ndata_cov.round(2).head()",
"_____no_output_____"
]
],
[
[
"<p> The problem with the above matrix is that it depends on the units of the variables, so it is difficult to see the relationship between variables with different units. This requires you to scale the data and find the correlation matrix; which not only provides you with how the variables are related (positively/negatively), but also the degree to which they are related to each other.</p>",
"_____no_output_____"
],
[
"<p> <b>Note:</b> If the variables in your data set have the same units, you don't need to scale the data. This means that you can work with the covariance matrix.</p>",
"_____no_output_____"
],
[
"<a id=\"Scaling Data\"></a>",
"_____no_output_____"
],
[
"#### Scaling Data",
"_____no_output_____"
],
[
"<p>Scaling the data to unit variance removes the problem of having variables with different dimensions. In addition, this ensures that we account for the variation of the instances, not for their magnitude. Below is the equation that describes this step:</p>",
"_____no_output_____"
],
[
"$$ x_{i,j} = \\dfrac{{x_{i,j} - \\bar{x_{j}}}}{\\sqrt{\\sum_{i=1}^{N}\\dfrac{(x_{i,j} - \\bar{x_{j}})^2}{N}}} $$",
"_____no_output_____"
],
[
"<p> where ${N}$ is the total number of rows, $\\bar{x}$ is the mean of column $j$, and $i$ and $j$ represent the row and column number, respectively. </p>",
"_____no_output_____"
]
],
[
[
"# Scale data, ddof is set to 0 since we are using N not (N-1); which is the default for std in pandas\ndataset_scale = (dataset_center)/(dataset_center.std(ddof = 1))\n# Correlation matrix\ndata_corr = dataset_scale.corr()\n\n#Changing the names for the columns and indexes\ndata_corr.columns = ['Exports of Crude Oil', 'Imports Excluding SPR', 'Ending Stocks of Crude Oil',\n 'Field Production', 'Ending Stocks excluding SPR', 'Refiner Net Input', 'WTI Spot', 'Brent Spot' ]\ndata_corr.index = ['Exports of Crude Oil', 'Imports Excluding SPR', 'Ending Stocks of Crude Oil',\n 'Field Production', 'Ending Stocks excluding SPR', 'Refiner Net Input', 'WTI Spot', 'Brent Spot' ]\n\ndata_corr.round(2)",
"_____no_output_____"
]
],
[
[
"<a id=\"Mathematical Computation\"></a>",
"_____no_output_____"
],
[
"### Mathematical Computation",
"_____no_output_____"
],
[
"<p> The next step is to find the eigenvalues and eigenvectors of our matrix. But before doing so, we will need to go over some linear algebra concepts that are essential for understanding PCA. As mentioned in the introduction, PCA is a projection method that maps the data into a different space whose axes are called principal components (PC). In order to do so, eigendecomposition is applied to the covariance matrix (in our case, the correlation matrix):</p> ",
"_____no_output_____"
],
[
"$$Cov(X) = V\\Lambda V^{-1} $$",
"_____no_output_____"
],
[
"<p>where $V$ is a square matrix whose rows represent the eigenvectors of $Cov(X)$, and $\\Lambda$ is a diagonal matrix whose diagonal elements are the eigenvalues of $Cov(X)$. The eigenvectors $v_{1}, v_{2},..., v_{n}$ are produced by the linear combination of the original variables $X_{1}, X_{2},..., X_{p}$ as shown below:</p>",
"_____no_output_____"
],
[
"$$v_{n} = w_{n1}X_{1} + w_{n2}X_{2} + ... + w_{np}X_{p}$$",
"_____no_output_____"
],
[
"<p> where $w_{np}$ represents the weight of each variable and the eigenvectors are the actual principal components.</p>",
"_____no_output_____"
]
],
[
[
"# Finding eigenvalues(w) and eigenvectors(v)\nw, v = np.linalg.eig(data_corr)\nprint \"Eigenvalues:\" \nprint w.round(3)\nprint \"Eigenvectors:\"\nprint v.round(3)",
"Eigenvalues:\n[ 3.222 2.806 1.14 0.008 0.08 0.309 0.187 0.249]\nEigenvectors:\n[[-0.104 0.492 -0.347 -0.042 0.192 -0.114 0.18 0.736]\n [-0.081 -0.5 -0.401 -0.018 -0.594 -0.412 0.162 0.184]\n [-0.498 0.044 0.234 0.013 -0.161 -0.159 -0.772 0.218]\n [-0.142 0.539 -0.133 0.03 -0.673 0.403 0.077 -0.222]\n [-0.387 0.335 -0.034 0.032 0.154 -0.652 0.223 -0.488]\n [-0.254 -0.149 -0.763 0.01 0.315 0.293 -0.276 -0.265]\n [-0.495 -0.214 0.172 0.693 0.073 0.246 0.341 0.129]\n [-0.505 -0.187 0.183 -0.718 0.054 0.244 0.311 0.047]]\n"
]
],
[
[
"<p> The eigenvalues represent the amount of variation contained by each of the principal components. The purpose of finding these values is to obtain the number of components that provide a reasonable generalization of the entire data set; which would be less than the total number of variables in the data. In order to determine the number of components that account for the largest variation in our data set, the scree plot is implemented as shown in the chart below: </p>",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(16,10))\n# Scree Plot\nplt.subplot(221)\nplt.plot([1,2,3,4,5,6,7,8], w, 'ro-')\nplt.title('Scree Plot', fontsize=16, fontweight='bold')\nplt.xlabel('Principal Components', fontsize=14, fontweight='bold')\nplt.ylabel('Eigenvalues', fontsize=14, fontweight='bold')\n\n# Cumulative Explained Variation \nplt.subplot(222)\nplt.plot([1,2,3,4,5,6,7,8], (w*100/sum(w)).cumsum(), 'bo-')\nplt.title('Cumulative Explained Variation', fontsize=16, fontweight='bold')\nplt.xlabel('Principal Components', fontsize=14, fontweight='bold')\nplt.ylabel('Amount of Variation (%)', fontsize=14, fontweight='bold')",
"_____no_output_____"
]
],
[
[
"<p> As you can see in the scree plot, the value of the eigenvalues drop significantly between PC3 and PC4 and it is not very clear how many components represent the data set. As a result, the cumulative explained variation is plotted and it clearly shows that the first 3 components account for approximately 90% of the variation of the data. <br>\nNow that we have determined the number of PCs, we can map the scale data into the new coordinate system governed by the principal components. This mapped dataset is called in PCA, the scores:</p>",
"_____no_output_____"
],
[
"$$ scores = XV^{T} $$",
"_____no_output_____"
],
[
"<p>where $V$ is a matrix with the number of PCs that explain most of the variation of the data. For visualization purposes, we will only use the first two components which account for ~ 75% of the total variation.</p>",
"_____no_output_____"
]
],
[
[
"# Scores\nscores = (dataset_scale.as_matrix()).dot(v[:,:2])\nscores",
"_____no_output_____"
]
],
[
[
"<p>Having the scores allows us to observe the direction of the principal components in the x-y axis as shown in the following graph:</p>",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,8))\n\n# Principal Components\npc1 = v[:, 0]\npc2 = v[:, 1]\n\npc1x = []\npc2x = []\npc1y = []\npc2y = []\n\n# Plotting the scaled data and getting the values of the PCs\nfor ii, jj in zip(scores, dataset_scale.as_matrix()):\n pc1x.append(pc1[0]*ii[0]), pc1y.append(pc1[1]*ii[0])\n pc2x.append(pc2[0]*ii[1]), pc2y.append(pc2[1]*ii[1])\n plt.scatter(jj[0], jj[1], color = 'b', marker = 'o')\n\n# Fitting PC1 as a line\nfit_pc1 = np.polyfit(pc1x, pc1y, 1)\nline_pc1 = np.poly1d(fit_pc1)\nnew_points_pc1 = np.arange(-2,4)\nplt.plot(new_points_pc1, line_pc1(new_points_pc1), 'g-', label= \"PC1\")\n\n# Fitting PC2 as a line\nfit_pc2 = np.polyfit(pc2x, pc2y, 1)\nline_pc2 = np.poly1d(fit_pc2)\nnew_points_pc2 = np.arange(-2,4)\nplt.plot(new_points_pc2, line_pc2(new_points_pc2), 'r-', label= \"PC2\")\n\nplt.xlim([-2, 3])\nplt.ylim([-3, 2])\nplt.title('Scaled Data with the Principal Components', fontsize=16, fontweight='bold')\nplt.xlabel('Exports of Crude Oil', fontsize=14, fontweight='bold')\nplt.ylabel('Imports Excluding SPR', fontsize=14, fontweight='bold')\nplt.legend(prop={'size':13, 'weight':'bold'}, frameon=True)",
"_____no_output_____"
]
],
[
[
"<p>As you can see above, the principal components are orthogonal (perpendicular) to each other, which is expected as this is an attribute of eigenvectors. Visually, PC1 and PC2 seem to capture a similar amount of variation and this correlates with the eigenvalues obtained whose values are 3.222 and 2.806 for PC1 and PC2, respectively.</p>",
"_____no_output_____"
],
[
"<p>Now we will use the most important visualization plot of PCA, called the biplot. Here we will plot the scores along with the weights of the principal components, which are also called the loadings and represent the direction of the variables of $X$. The scores are plotted as data points, while the loadings will be plotted as vectors</p>",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,8))\nfor i in scores:\n plt.scatter(i[0], i[1], color = 'b')\n\nplt.xlim([-5, 3])\nplt.ylim([-6, 4])\nplt.title('Biplot', fontsize=16, fontweight='bold')\nplt.xlabel('PC1 (40.3%)', fontsize=14, fontweight='bold')\nplt.ylabel('PC2 (35.1%)', fontsize=14, fontweight='bold')\n\n# Labels for the loadings\nnames = list(data_corr.columns)\n#Transposing the eigenvectors\nv_t = v[:,:2]\npc_1 = v_t[:, 0]\npc_2 = v_t[:, 1]\n\n# Plotting the loadings as vectors\n# The vectors are multiply by 4 due to visualization purposes\nfor i in range(len(pc_1)):\n plt.arrow(0, 0, pc_1[i]*5, pc_2[i]*5, color='r', width=0.002, head_width=0.045)\n plt.text(pc_1[i]*5, pc_2[i]*5, names[i], color='r')",
"_____no_output_____"
]
],
[
[
"<p>As you can see above, some of the variables are close to each other and this means that they are strongly related to one another. The actual correlation can be obtained by calculating the cosine of the angle between the variables. In addition, you can look back at the correlation matrix to see if those variables have a strong correlation. For example, the WTI and Brent spot prices vectors are very close to each other and their correlation value in the correlation matrix is 99%. The data points on the right-hand side of the plot, will certainly have low values as they are in the opposite direction of the variables.</p>",
"_____no_output_____"
],
[
"<a id=\"PCA with sklearn\"></a>",
"_____no_output_____"
],
[
"### PCA with sklearn",
"_____no_output_____"
],
[
"<p>On this section, we will apply PCA directly to the scaled data using the sklearn decomposition library. First, define the number of components that you want (from the previous section, we found that 3 components explained most of the variation of the data, but 2 were chosen for visualization purposes).</p>",
"_____no_output_____"
]
],
[
[
"pca = PCA(n_components=2)",
"_____no_output_____"
]
],
[
[
"<p>Second, fit the scaled data to PCA to find the eigenvalues and eigenvectors</p>",
"_____no_output_____"
]
],
[
[
"pca.fit(dataset_scale.as_matrix())\n# Eigenvectors\nprint \"Eigenvectors:\"\nprint pca.components_\nprint \n# Eigenvalues\nprint \"Eigenvalues:\"\nprint pca.explained_variance_\nprint \n# Variance explained\nprint \"Amount of variance explained per component (%)\"\nprint pca.explained_variance_ratio_*100",
"Eigenvectors:\n[[-0.10390372 -0.08094858 -0.4979827 -0.14178406 -0.38709197 -0.25433889\n -0.4946933 -0.50528402]\n [-0.49233521 0.49995417 -0.04365996 -0.53907923 -0.33467661 0.14923826\n 0.21430968 0.18689607]]\n\nEigenvalues:\n[ 3.21945411 2.80348497]\n\nAmount of variance explained per component (%)\n[ 40.27451837 35.07085462]\n"
]
],
[
[
"<p> This is also a good way to check if our procedure was correct. If you look back into the <a href=\"#Mathematical Computation\"> Mathematical Computation </a>section, you will see that the values match! Next, apply dimensionality reduction to the data to obtain the scores. </p>",
"_____no_output_____"
]
],
[
[
"# Apply dimensionality reduction (Finding the scores)\nscores_sk = pca.transform(dataset_scale.as_matrix())\nprint scores_sk",
"[[ 1.28881571 -2.58539236]\n [ 1.23529003 -2.63038672]\n [ 1.29608702 -3.35400166]\n ..., \n [-3.39489928 -4.92057914]\n [-3.45961704 -4.93657062]\n [-3.35244805 -4.98731342]]\n"
]
],
[
[
"<p>Last, get the biplot with the values obtained in this section. </p>",
"_____no_output_____"
]
],
[
[
"plt.figure(figsize=(10,8))\nfor i in scores_sk:\n plt.scatter(i[0], i[1], color = 'b')\n\n# Assigning the PCs \npc1_sk = pca.components_[0]\npc2_sk = pca.components_[1]\n \nplt.title('Biplot', fontsize=16, fontweight='bold')\nplt.xlabel('PC1 (40.3%)', fontsize=14, fontweight='bold')\nplt.ylabel('PC2 (35.1%)', fontsize=14, fontweight='bold')\nplt.xlim([-5, 3])\nplt.ylim([-6, 4])\n\n# Labels for the loadings\nnames = list(data_corr.columns)\n\n# Plotting the loadings as vectors \nfor i in range(len(pc1_sk)):\n plt.arrow(0, 0, pc1_sk[i]*5, pc2_sk[i]*5, color='r', width=0.002, head_width=0.025)\n plt.text(pc1_sk[i]*5, pc2_sk[i]*5, names[i], color='r')",
"_____no_output_____"
]
],
[
[
"<a id=\"Summary\"></a>",
"_____no_output_____"
],
[
"### Summary",
"_____no_output_____"
],
[
"<p>Principal component analysis is a powerful technique that allows you to find the hidden patterns and correlations of a data set through dimensionality reduction. Dimensionality reduction is accomplished by finding the eigenvectors that explain the largest variance of the data. In practice, the first two components usually account for most of the variation of the data. To visualize the data in the coordinate system governed by the principal components, the biplot is implemented. This visualization plot allows to see the correlations between the new data (or rotated data) as data points along with the variables of the original data set as vectors. \nOn this tutorial, PCA was implemented step by step with the only purpose of given the reader a deep understanding of this technique. Then, the sklearn library for PCA was used to compare the results.</p>",
"_____no_output_____"
],
[
"<a id=\"References\"></a>",
"_____no_output_____"
],
[
"### References",
"_____no_output_____"
],
[
"<ol style=\"1\" margin-top=\"-10px\">\n <li> <a href=https://datajobs.com/data-science-repo/PCA-Tutorial-[Shlens].pdf> A Tutorial on Principal Component Analysis</a></li>\n <li><a href=https://www.researchgate.net/profile/Kim_Esbensen/publication/222347483_Principal_Component_Analysis/links/00b4952c66e796fc2d000000.pdf> Principal Component Analysis </a></li>\n <li> <a href=http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html> sklearn library for PCA</a></li>\n <li><a href=http://www.wiley.com/WileyCDA/WileyTitle/productCd-0471489786.html>\n Chemometrics: Data Analysis for the Laboratory and Chemical Plant</a></li>\n <li><a href=https://www.youtube.com/watch?v=5zk93CpKYhg> PCA tutorial in R </a></li>\n</ol>",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown",
"markdown",
"markdown"
]
] |
e7fd0542a5139d058958c8799909f40461e46d6c | 8,260 | ipynb | Jupyter Notebook | notebooks/B1. Logistic Regression - Overview.ipynb | vitorpq/LearnDataScience | 23b06019d9a0da445d713cd3056d307bd0df477f | [
"BSD-2-Clause"
] | 21 | 2015-02-16T18:14:20.000Z | 2021-04-15T19:27:39.000Z | notebooks/B1. Logistic Regression - Overview.ipynb | isayev/LearnDataScience | 8827b954575b5276017d546562379f55ef3f1ee4 | [
"BSD-2-Clause"
] | null | null | null | notebooks/B1. Logistic Regression - Overview.ipynb | isayev/LearnDataScience | 8827b954575b5276017d546562379f55ef3f1ee4 | [
"BSD-2-Clause"
] | 18 | 2015-01-17T00:42:33.000Z | 2020-12-11T01:10:22.000Z | 34.132231 | 265 | 0.482567 | [
[
[
"empty"
]
]
] | [
"empty"
] | [
[
"empty"
]
] |
e7fd08b8923d00894bfe53ad1d5769ee642d3c64 | 51,910 | ipynb | Jupyter Notebook | docs/notebooks/analysis/example_fit_ramsey.ipynb | dpfranke/qtt | f60e812fe8b329e67f7b38d02eef552daf08d7c9 | [
"MIT"
] | null | null | null | docs/notebooks/analysis/example_fit_ramsey.ipynb | dpfranke/qtt | f60e812fe8b329e67f7b38d02eef552daf08d7c9 | [
"MIT"
] | null | null | null | docs/notebooks/analysis/example_fit_ramsey.ipynb | dpfranke/qtt | f60e812fe8b329e67f7b38d02eef552daf08d7c9 | [
"MIT"
] | null | null | null | 300.057803 | 25,496 | 0.931401 | [
[
[
"# Fitting the data from a Ramsey experiment",
"_____no_output_____"
],
[
"In this notebook we analyse data from a Ramsey experiment. Using the method and data from:\n\nWatson, T. F., Philips, S. G. J., Kawakami, E., Ward, D. R., Scarlino, P., Veldhorst, M., … Vandersypen, L. M. K. (2018). A programmable two-qubit quantum processor in silicon. Nature, 555(7698), 633–637. https://doi.org/10.1038/nature25766\n\nThe signal that results from a Ramsey experiment oscillates at a frequency corresponding to the difference between the qubit frequency and the MW source frequency. Therefore, it can be used to accurately calibrate the MW source to be on-resonance with the qubit. Additionally, the decay time of the Ramsey signal corresponds to the free-induction decay or T2* of the qubit.\n\nThis example takes a Ramsey dataset and uses the core function `qtt.algorithms.functions.fit_gauss_ramsey` to fit it, returning the frequency and decay of the signal.",
"_____no_output_____"
]
],
[
[
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom qtt.algorithms.functions import gauss_ramsey, fit_gauss_ramsey",
"_____no_output_____"
]
],
[
[
"Test data, based on the data acquired by Watson et all.",
"_____no_output_____"
]
],
[
[
"y_data = np.array([0.6019, 0.5242, 0.3619, 0.1888, 0.1969, 0.3461, 0.5276, 0.5361,\n 0.4261, 0.28 , 0.2323, 0.2992, 0.4373, 0.4803, 0.4438, 0.3392,\n 0.3061, 0.3161, 0.3976, 0.4246, 0.398 , 0.3757, 0.3615, 0.3723,\n 0.3803, 0.3873, 0.3873, 0.3561, 0.37 , 0.3819, 0.3834, 0.3838,\n 0.37 , 0.383 , 0.3573, 0.3869, 0.3838, 0.3792, 0.3757, 0.3815])\n\ntotal_wait_time = 1.6e-6\nx_data = np.linspace(0, total_wait_time, len(y_data))",
"_____no_output_____"
]
],
[
[
"Plotting the data:",
"_____no_output_____"
]
],
[
[
"plt.figure()\nplt.plot(x_data * 1e6,y_data, '--o')\nplt.xlabel(r'time ($\\mu$s)')\nplt.ylabel('Q1 spin-up probability')\nplt.show()",
"_____no_output_____"
]
],
[
[
"Applying the `fit_gauss_ramsey` function to fit the data:",
"_____no_output_____"
]
],
[
[
"par_fit_test, _ = fit_gauss_ramsey(x_data, y_data)\nfreq_fit = abs(par_fit_test[2] * 1e-6)\nt2star_fit = par_fit_test[1] * 1e6",
"_____no_output_____"
]
],
[
[
"Plotting the data and the fit:",
"_____no_output_____"
]
],
[
[
"test_x = np.linspace(0, total_wait_time, 200)\nplt.figure()\nplt.plot(x_data * 1e6, y_data, 'o', label='Data')\nplt.plot(test_x * 1e6, gauss_ramsey(test_x, par_fit_test), label='Fit')\nplt.title('Frequency detuning: %.1f MHz / $T_2^*$: %.1f $\\mu$s' % (freq_fit, t2star_fit))\nplt.xlabel('time ($\\mu$s)')\nplt.ylabel('Spin-up probability')\nplt.legend()\nplt.show()",
"_____no_output_____"
]
],
[
[
"Note that for the Ramsey experiment, the frequency is of the MW source is offset by 4 MHz. Therefore, this experiment shows that the qubit was off-resonance from the source by -200 kHz.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |
e7fd2620e0454d771cb35c6e7be40f2a916cb9c5 | 10,966 | ipynb | Jupyter Notebook | Lesson05/Activity27/activity27.ipynb | webobite/Data-Visualization-with-Python | 1c0ef63da951c9f77351ba621e71cba2600dce00 | [
"MIT"
] | 74 | 2019-03-22T11:25:01.000Z | 2022-03-16T16:09:02.000Z | Lesson05/Activity27/activity27.ipynb | zwfengineer/Data-Visualization-with-Python | 18f79fcfefe53a2cbd2309c8648bbe16c33150c2 | [
"MIT"
] | 1 | 2019-06-17T02:04:23.000Z | 2019-06-17T03:20:30.000Z | Lesson05/Activity27/activity27.ipynb | zwfengineer/Data-Visualization-with-Python | 18f79fcfefe53a2cbd2309c8648bbe16c33150c2 | [
"MIT"
] | 84 | 2018-11-29T12:59:44.000Z | 2022-03-22T04:04:57.000Z | 26.616505 | 214 | 0.591373 | [
[
[
"## Plotting geospatial data on a map",
"_____no_output_____"
],
[
"In this first activity for geoplotlib, you'll combine methodologies learned in the previous exercise and use theoretical knowledge from previous lessons. \nBesides from wrangling data you need to find the area with given attributes. \n\nBefore we can start, however, we need to import our dataset. \nFor this activity, we'll work with geo-spatial data that contains all cities with their coordinates and their population.\n\n**Note:** \nThis time the dataset is not yet added into the data folder. You have to download it from here: \nhttps://www.kaggle.com/max-mind/world-cities-database#worldcitiespop.csv",
"_____no_output_____"
],
[
"#### Loading the dataset",
"_____no_output_____"
]
],
[
[
"# importing the necessary dependencies\nimport numpy as np\nimport pandas as pd\nimport geoplotlib",
"_____no_output_____"
],
[
"# loading the Dataset (make sure to have the dataset downloaded)\n",
"_____no_output_____"
]
],
[
[
"**Note:** \nIf we import our dataset without defining the dtype of column *Region* as String, we will get a warning telling out the it has a mixed datatype. \nWe can get rid of this warning by explicitly defining the type of the values in this column by using the `dtype` parameter. \n`dtype={'Region': np.str}`",
"_____no_output_____"
]
],
[
[
"# looking at the data types of each column\n",
"_____no_output_____"
]
],
[
[
"**Note:** \nHere we can see the dtypes of each column. \nSince the String type is no primitive datatype, it's displayed as `object` here.",
"_____no_output_____"
]
],
[
[
"# showing the first 5 entries of the dataset\n",
"_____no_output_____"
]
],
[
[
"---",
"_____no_output_____"
],
[
"#### Mapping `Latitude` and `Longitude` to `lat` and `lon`",
"_____no_output_____"
],
[
"Most datasets won't be in the format that you want to have. Some of them might have their latitude and longitude values hidden in a different column. \nThis is where the data wrangling skills of lesson 1 are needed. \n\nFor the given dataset, the transformations are easy, we simply need to map the `Latitude` and `Longitude` columns into `lat` and `lon` columns which are used by geoplotlib.",
"_____no_output_____"
]
],
[
[
"# mapping Latitude to lat and Longitude to lon\n",
"_____no_output_____"
]
],
[
[
"**Note:** \nGeoplotlibs methods expect dataset columns `lat` and `lon` for plotting. This means your dataframe has to be tranfsormed to resemble this structure. ",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"#### Understanding our data",
"_____no_output_____"
],
[
"It's your first day at work, your boss hands you this dataset and wants you to dig into it and find the areas with the most adjacent cities that have a population of more than 100k. \nHe needs this information to figure out where to expand next. \n\nTo get a feeling for how many datapoints the dataset contains, we'll plot the whole dataset using dots.",
"_____no_output_____"
]
],
[
[
"# plotting the whole dataset with dots\n",
"_____no_output_____"
]
],
[
[
"Other than seeing the density of our datapoints, we also need to get some information about how the data is distributed.",
"_____no_output_____"
]
],
[
[
"# amount of countries and cities\n",
"_____no_output_____"
],
[
"# amount of cities per country (first 20 entries)\n",
"_____no_output_____"
],
[
"# average num of cities per country\n",
"_____no_output_____"
]
],
[
[
"Since we are only interested in areas with densely placed cities and high population, we can filter out cities without a population. ",
"_____no_output_____"
],
[
"#### Reducing our data",
"_____no_output_____"
],
[
"Our dataset has more than 3Mio cities listed. Many of them are really small and can be ignored, given our objective for this activity. \nWe only want to look at those cities that have a value given for their population density.\n\n**Note:** \nIf you're having trouble filtering your dataset, you can always check back with the activities in lesson1.",
"_____no_output_____"
]
],
[
[
"# filter for countries with a population entry (Population > 0)\n",
"_____no_output_____"
],
[
"# displaying the first 5 items from dataset_with_pop\n",
"_____no_output_____"
],
[
"# showing all cities with a defined population with a dot density plot\n",
"_____no_output_____"
]
],
[
[
"**Note:** \nNot only the execution time of the visualization has been decreased but we already can see where the areas with more cities are. \n\nFollowing the request from our boss, we shall only consider areas that have a high density of adjacent cities with a population of more than 100k.",
"_____no_output_____"
]
],
[
[
"# dataset with cities with population of >= 100k\n",
"_____no_output_____"
],
[
"# displaying all cities >= 100k population with a fixed bounding box (WORLD) in a dot density plot\nfrom geoplotlib.utils import BoundingBox\n",
"_____no_output_____"
]
],
[
[
"**Note:** \nIn order to get the same view on our map every time, we can set the bounding box to the constant viewport declared in the geoplotlib library. \nWe can also instantiate the BoundingBox class with values for north, west, south, and east.",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"#### Finding the best area",
"_____no_output_____"
],
[
"After reducing our data, we can now use more complex plots to filter down our data even more. \nThinking back to the first exercise, we've seen that histograms and voronoi plots can give us a quick visual representation of the density of data.\n\n**Note:** \nTry playing around with the different color maps of the plotting methods, sometimes using other colors does not only improve the visuals but also the amount of information you can take from the visualization.",
"_____no_output_____"
]
],
[
[
"# using filled voronoi to find dense areas\n",
"_____no_output_____"
]
],
[
[
"In the voronoi plot we can see tendencies. \nGermany, Great Britain, Nigeria, India, Japan, Java, the East Coast of the USA, and Brasil stick out. \nWe can now again filter our data and only look at those countries to find the best suited. ",
"_____no_output_____"
],
[
"---",
"_____no_output_____"
],
[
"#### Final call",
"_____no_output_____"
],
[
"After meeting with your boss, he tells you that we want to stick to Europe when it comes to expanding. \nFilter your data for Germany and Great Britain only and decide which area is your final proposal.",
"_____no_output_____"
]
],
[
[
"# filter 100k dataset for cities in Germany and GB\n",
"_____no_output_____"
],
[
"# using Delaunay triangulation to find the most dense aree\n",
"_____no_output_____"
]
],
[
[
"Looking at our delaunay visualization, we can quickly see that area around Cologne and Düsseldorf stick out. \nWith those insights, we can now get back to our boss and talk about what we found out.",
"_____no_output_____"
],
[
"**Note:** \nAs mentioned before, it's important to know which visualization type helps you achieve the best insights. \nWe e.g. could've simply used a dot density map in the final call which would have also given us an idea about where there are most cities. \nHowever delaunay triangulation is a good approach here that makes details pop nearly instantly.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown",
"markdown",
"markdown"
],
[
"code",
"code",
"code"
],
[
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown",
"markdown",
"markdown"
],
[
"code",
"code"
],
[
"markdown",
"markdown"
]
] |
e7fd281607286cbfde86ef0099ef58722519fef2 | 13,152 | ipynb | Jupyter Notebook | BRL/vehicleload/Preprocessing_readedge.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | 1 | 2022-01-18T01:53:34.000Z | 2022-01-18T01:53:34.000Z | BRL/vehicleload/Preprocessing_readedge.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | BRL/vehicleload/Preprocessing_readedge.ipynb | kidrabit/Data-Visualization-Lab-RND | baa19ee4e9f3422a052794e50791495632290b36 | [
"Apache-2.0"
] | null | null | null | 35.072 | 190 | 0.502737 | [
[
[
"import numpy as np\nimport pandas as pd\nimport pickle\nimport math\nimport pandas as pd\nfrom pandas import HDFStore \nimport argparse\n\n###################################################################################\n#location\nnode_ids_filename = 'data/node_locate.txt'\nwith open(node_ids_filename) as f:\n _node_ids = f.read().strip()\n _node_ids = _node_ids.replace('\\n', ' ')\n _node_ids = _node_ids.split(' ')\n\nnode_id = []\nnode_ids = []\nnode_name = []\nnode_loc = []\nfor i in range(len(_node_ids)):\n if(_node_ids[i] == ''):\n continue\n node_id.append(_node_ids[i])\n\nfor i in range(len(node_id)):\n if(i <= 4):\n continue\n if(node_id[i] == 'C'):\n continue\n node_ids.append(node_id[i])\nfor i in range(len(node_ids)):\n if(i % 4 == 0):\n line = []\n node_name.append(node_ids[i])\n line.append(node_ids[i+1])\n line.append(node_ids[i+2])\n line.append(node_ids[i+3])\n node_loc.append(line)\nnode_loc=np.array(node_loc).astype('float32')\nprint(\"node_loc\",node_loc.shape)\n\n###################################################################################\n#save to npy file\nnp.save('data/node_signal', _node_signal)\nnp.save('data/node_loc', node_loc)",
"_node_signal (135, 106, 7)\nnode_loc (106, 3)\n-118.45 139.457\n-31.3102 21.6857\n-563.119 91.0359\n-0.0160272 0.000982589\n-0.0147581 0.0163793\n-0.00474041 0.0034676\n0.0 0.0\n"
],
[
"import numpy as np\nimport pandas as pd\nimport pickle\nimport math\nimport pandas as pd\nfrom pandas import HDFStore \nimport argparse\nimport matplotlib.pyplot as plt\n\n####################################################################################\nnode_loc=np.load('data/node_loc.npy')\nn_nodes=node_loc.shape[0]\nnode_id = [str(i) for i in range(1,1+n_nodes)]\nnode_id_to_ind = {}\nfor i, node in enumerate(node_id, 1):\n node_id_to_ind[node] = i\nnum_node = len(node_id)\n\n#element 1################\nnode_link_filename = 'data/raw_data/element1.txt'\nlink_mx = np.zeros((num_node, num_node), dtype=np.float32)\nwith open(node_link_filename) as f:\n _node_link = f.read().strip()\n _node_link = _node_link.replace('\\n', ' ')\n _node_link = _node_link.split(' ')\n\nnode_link = []\nfor i in range(len(_node_link)):\n if(_node_link[i] == ''):\n continue\n node_link.append(_node_link[i])\n\nelement_id = []\n\nnode_link = np.reshape(node_link, (-1, 6))\nnode_link = node_link.astype('int32')\n\nlink_mx = np.zeros((num_node, num_node), dtype=np.float32)\nfor i in range(39):\n link_mx[node_link[i][2]-1][node_link[i][3]-1] = node_link[i][1]\n link_mx[node_link[i][3]-1][node_link[i][2]-1] = node_link[i][1]\nlink_mx1_1=link_mx\n\nlink_mx = np.zeros((num_node, num_node), dtype=np.float32)\nfor i in range(39,node_link.shape[0]):\n link_mx[node_link[i][2]-1][node_link[i][3]-1] = node_link[i][1]\n link_mx[node_link[i][3]-1][node_link[i][2]-1] = node_link[i][1] \nlink_mx1_2=link_mx\n\n#element 2################\nnode_link_filename = 'data/raw_data/element2.txt'\nlink_mx = np.zeros((num_node, num_node), dtype=np.float32)\nwith open(node_link_filename) as f:\n _node_link = f.read().strip()\n _node_link = _node_link.replace('\\n', ' ')\n _node_link = _node_link.split(' ')\n\nnode_link = []\nfor i in range(len(_node_link)):\n if(_node_link[i] == ''):\n continue\n node_link.append(_node_link[i])\n\nelement_id = []\nnode_link=[i for i in node_link if i.isdigit()]\nnode_link = np.reshape(node_link, (-1, 5))\nnode_link = node_link.astype('int32')\nfor i in range(node_link.shape[0]):\n link_mx[node_link[i][1]-1][node_link[i][2]-1] = node_link[i][1]\n link_mx[node_link[i][2]-1][node_link[i][1]-1] = node_link[i][1]\nlink_mx2=link_mx\n\n#element 3################\nnode_link_filename = 'data/raw_data/element3.txt'\nlink_mx = np.zeros((num_node, num_node), dtype=np.float32)\nwith open(node_link_filename) as f:\n _node_link = f.read().strip()\n _node_link = _node_link.replace('\\n', ' ')\n _node_link = _node_link.split(' ')\n\nnode_link = []\nfor i in range(len(_node_link)):\n if(_node_link[i] == ''):\n continue\n node_link.append(_node_link[i])\n\nelement_id = []\n\nnode_link = np.reshape(node_link, (-1, 6))\nnode_link = node_link.astype('int32')\nfor i in range(node_link.shape[0]):\n link_mx[node_link[i][2]-1][node_link[i][3]-1] = node_link[i][1]\n link_mx[node_link[i][3]-1][node_link[i][2]-1] = node_link[i][1]\nlink_mx3=link_mx\n\n####################################################################################\nlink_mx=link_mx1_1+link_mx1_2+link_mx2+link_mx3\n#np.save('data/link_mx', link_mx)\nnp.sum(link_mx1_1!=0),np.sum(link_mx1_2!=0),np.sum(link_mx2!=0),np.sum(link_mx3!=0)\n\n\n####################################################################################\n#adj matrix\n#link_mx=np.load('data/link_mx.npy')\nlink_mx0=link_mx1_1+link_mx1_2+link_mx2+link_mx3\nlink_mx0=link_mx0+np.eye(106)\nlink_mx0[link_mx0!=0]=1\n\nlink_mx1_1=link_mx1_1+np.eye(106)#cross beam\nlink_mx1_1[link_mx1_1!=0]=1\n\nlink_mx1_2=link_mx1_2+np.eye(106)#girder\nlink_mx1_2[link_mx1_2!=0]=1\n\nlink_mx2=link_mx2+np.eye(106)#cable\nlink_mx2[link_mx2!=0]=1\n\nlink_mx3=link_mx3+np.eye(106)#tower\nlink_mx3[link_mx3!=0]=1\n\n####################################################################################\n#getting distance matrix\nnode_loc=np.load('data/node_loc.npy')\nnum_node=len(node_loc)\nn_nodes=node_loc.shape[0]\ndist_mx = np.zeros((num_node, num_node), dtype=np.float32)\ndist_mx[:] = np.inf\nfor i in range(num_node):\n for j in range(num_node):\n x = float(node_loc[i][0]) - float(node_loc[j][0])\n y = float(node_loc[i][1]) - float(node_loc[j][1])\n z = float(node_loc[i][2]) - float(node_loc[j][2])\n x = math.pow(x,2)\n y = math.pow(y,2)\n z = math.pow(z,2)\n dis = math.sqrt(x+y+z)\n dist_mx[i, j] = dis\n#distances = dist_mx[~np.isinf(dist_mx)].flatten()\n# std = distances.std()\n# adj_mx = np.exp(-np.square(dist_mx / std))\nadj_mx=dist_mx\n\nadj_mx0=adj_mx*link_mx0\nprint(adj_mx)\n\n################################################\nimport collections\nprint(collections.Counter(adj_mx0.flatten()))\n\n#cross beam-#girder-#cable-#tower\ntype_e=np.array([200000,200000,15800,200000])#type_e=np.array([210000,180000,140000,200000])\ntype_e=(type_e)#/np.max(type_e)\nprint(type_e,type_e.std(),adj_mx0.std())\n\nlink_mx1_1_e=link_mx1_1-np.eye(106)\nlink_mx1_1_e=link_mx1_1_e*type_e[0]#cross beam\n\nlink_mx1_2_e=link_mx1_2-np.eye(106)\nlink_mx1_2_e=link_mx1_2_e*type_e[1]#girder\n\nlink_mx2_e=link_mx2-np.eye(106)\nlink_mx2_e=link_mx2_e*type_e[2]#cable\n\nlink_mx3_e=link_mx3-np.eye(106)\nlink_mx3_e=link_mx3_e*type_e[3]#tower\n\nadj_mx_e=link_mx1_1_e+link_mx1_2_e+link_mx2_e+link_mx3_e\nprint(adj_mx_e)\n\nprint(collections.Counter(adj_mx_e.flatten()))",
"[[ 0. 6100. 12200. ... 178678.1 179105.72 179582.45]\n [ 6100. 0. 6100. ... 172640.92 173083.48 173576.75]\n [ 12200. 6100. 0. ... 166608.34 167066.88 167577.86]\n ...\n [178678.1 172640.92 166608.34 ... 0. 3000. 6000. ]\n [179105.72 173083.48 167066.88 ... 3000. 0. 3000. ]\n [179582.45 173576.75 167577.86 ... 6000. 3000. 0. ]]\nCounter({0.0: 10866, 6100.0: 152, 7500.0: 82, 3000.0: 32, 62562.05078125: 16, 50520.1953125: 16, 38810.4375: 16, 27854.802734375: 16, 19005.525390625: 16, 9000.0: 16, 10000.0: 8})\n[200000 200000 15800 200000] 79760.9396885468 3727.836796625876\n[[ 0. 200000. 0. ... 0. 0. 0.]\n [200000. 0. 200000. ... 0. 0. 0.]\n [ 0. 200000. 0. ... 0. 0. 0.]\n ...\n [ 0. 0. 0. ... 0. 200000. 0.]\n [ 0. 0. 0. ... 200000. 0. 200000.]\n [ 0. 0. 0. ... 0. 200000. 0.]]\nCounter({0.0: 10866, 200000.0: 290, 15800.0: 80})\n"
],
[
"adj_mx=[adj_mx0,adj_mx_e]\nprint(np.sum(adj_mx0==0)/(106*106))\nprint(np.sum(adj_mx_e==0)/(106*106))\n########################################\noutput_pkl_filename='data/sensor_graph/adj_mx_type_e.pkl'\n########################################\nwith open(output_pkl_filename, 'wb') as f:\n pickle.dump([[], [], adj_mx], f, protocol=2)",
"0.9670701317194731\n0.9670701317194731\n"
],
[
"#####area\n#cross beam\nw=165.10;d=525.78;wt=8.89;ft=11.43;cb_a=(w*d)-(w-wt)*(d-2*ft);print(cb_a)#8245.14480000001\n#girder\nb=1000;h=1000;t=50;g_a=g_a=(b*h)-((b-t)*(h-t));print(g_a)#97500\n#cable\nc_a=3848.45#0\n#tower\nb=1000;h=2000;t=50;t_a=(b*h)-((b-t)*(h-t));print(t_a)#147500\n\n#cross beam-#girder-#cable-#tower\ntype_a=np.array([cb_a,g_a,c_a,t_a])#type_e=np.array([210000,180000,140000,200000])\ntype_a=(type_a)#/np.max(type_a)\nprint(type_a,type_a.std())\n\nlink_mx1_1_a=link_mx1_1-np.eye(106)\nlink_mx1_1_a=link_mx1_1_a*type_a[0]#cross beam\n\nlink_mx1_2_a=link_mx1_2-np.eye(106)\nlink_mx1_2_a=link_mx1_2_a*type_a[1]#girder\n\nlink_mx2_a=link_mx2-np.eye(106)\nlink_mx2_a=link_mx2_a*type_a[2]#cable\n\nlink_mx3_a=link_mx3-np.eye(106)\nlink_mx3_a=link_mx3_a*type_a[3]#tower\n\nadj_mx_a=link_mx1_1_a+link_mx1_2_a+link_mx2_a+link_mx3_a\nprint(collections.Counter(adj_mx_a.flatten()))\n\nwith open('data/sensor_graph/adj_mx_type_a.pkl', 'wb') as f:\n pickle.dump(adj_mx_a, f, protocol=2)",
"8245.14480000001\n97500\n147500\n[ 8245.1448 97500. 3848.45 147500. ] 60870.793198336956\nCounter({0.0: 10866, 97500.0: 152, 3848.45: 80, 8245.14480000001: 78, 147500.0: 60})\n"
]
]
] | [
"code"
] | [
[
"code",
"code",
"code",
"code"
]
] |
e7fd32b8400f33ae867fa988c1610613e1608171 | 6,411 | ipynb | Jupyter Notebook | DecisionTreeClassifier.ipynb | manishgaurav84/ml-from-scratch | 2af963d6e13889b6dcc8486ecaa0374577cee0c8 | [
"MIT"
] | null | null | null | DecisionTreeClassifier.ipynb | manishgaurav84/ml-from-scratch | 2af963d6e13889b6dcc8486ecaa0374577cee0c8 | [
"MIT"
] | null | null | null | DecisionTreeClassifier.ipynb | manishgaurav84/ml-from-scratch | 2af963d6e13889b6dcc8486ecaa0374577cee0c8 | [
"MIT"
] | null | null | null | 33.390625 | 100 | 0.511621 | [
[
[
"import numpy as np\nfrom collections import Counter\n\n\ndef entropy(y):\n hist = np.bincount(y)\n ps = hist / len(y)\n return -np.sum([p * np.log2(p) for p in ps if p > 0])\n\n\nclass Node:\n\n def __init__(self, feature=None, threshold=None, left=None, right=None, *, value=None):\n self.feature = feature\n self.threshold = threshold\n self.left = left\n self.right = right\n self.value = value\n\n def is_leaf_node(self):\n return self.value is not None\n\n\nclass DecisionTree:\n\n def __init__(self, min_samples_split=2, max_depth=100, n_feats=None):\n self.min_samples_split = min_samples_split\n self.max_depth = max_depth\n self.n_feats = n_feats\n self.root = None\n\n def fit(self, X, y):\n self.n_feats = X.shape[1] if not self.n_feats else min(self.n_feats, X.shape[1])\n self.root = self._grow_tree(X, y)\n\n def predict(self, X):\n return np.array([self._traverse_tree(x, self.root) for x in X])\n\n def _grow_tree(self, X, y, depth=0):\n n_samples, n_features = X.shape\n n_labels = len(np.unique(y))\n\n # stopping criteria\n if (depth >= self.max_depth\n or n_labels == 1\n or n_samples < self.min_samples_split):\n leaf_value = self._most_common_label(y)\n return Node(value=leaf_value)\n\n feat_idxs = np.random.choice(n_features, self.n_feats, replace=False)\n\n # greedily select the best split according to information gain\n best_feat, best_thresh = self._best_criteria(X, y, feat_idxs)\n \n # grow the children that result from the split\n left_idxs, right_idxs = self._split(X[:, best_feat], best_thresh)\n left = self._grow_tree(X[left_idxs, :], y[left_idxs], depth+1)\n right = self._grow_tree(X[right_idxs, :], y[right_idxs], depth+1)\n return Node(best_feat, best_thresh, left, right)\n\n def _best_criteria(self, X, y, feat_idxs):\n best_gain = -1\n split_idx, split_thresh = None, None\n for feat_idx in feat_idxs:\n X_column = X[:, feat_idx]\n thresholds = np.unique(X_column)\n for threshold in thresholds:\n gain = self._information_gain(y, X_column, threshold)\n\n if gain > best_gain:\n best_gain = gain\n split_idx = feat_idx\n split_thresh = threshold\n\n return split_idx, split_thresh\n\n def _information_gain(self, y, X_column, split_thresh):\n # parent loss\n parent_entropy = entropy(y)\n\n # generate split\n left_idxs, right_idxs = self._split(X_column, split_thresh)\n\n if len(left_idxs) == 0 or len(right_idxs) == 0:\n return 0\n\n # compute the weighted avg. of the loss for the children\n n = len(y)\n n_l, n_r = len(left_idxs), len(right_idxs)\n e_l, e_r = entropy(y[left_idxs]), entropy(y[right_idxs])\n child_entropy = (n_l / n) * e_l + (n_r / n) * e_r\n\n # information gain is difference in loss before vs. after split\n ig = parent_entropy - child_entropy\n return ig\n\n def _split(self, X_column, split_thresh):\n left_idxs = np.argwhere(X_column <= split_thresh).flatten()\n right_idxs = np.argwhere(X_column > split_thresh).flatten()\n return left_idxs, right_idxs\n\n def _traverse_tree(self, x, node):\n if node.is_leaf_node():\n return node.value\n\n if x[node.feature] <= node.threshold:\n return self._traverse_tree(x, node.left)\n return self._traverse_tree(x, node.right)\n\n def _most_common_label(self, y):\n counter = Counter(y)\n most_common = counter.most_common(1)[0][0]\n return most_common",
"_____no_output_____"
],
[
"import numpy as np\nfrom sklearn import datasets\nfrom sklearn.model_selection import train_test_split\n\n\ndef accuracy(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred) / len(y_true)\n return accuracy\n\ndata = datasets.load_breast_cancer()\nX = data.data\ny = data.target\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)\n\nclf = DecisionTree(max_depth=10)\nclf.fit(X_train, y_train)\n \ny_pred = clf.predict(X_test)\nacc = accuracy(y_test, y_pred)\n\nprint (\"Accuracy:\", acc)",
"Accuracy: 0.9122807017543859\n"
]
]
] | [
"code"
] | [
[
"code",
"code"
]
] |
e7fd596077ce0ad0dba7a2b55f19b5a57d2d9bcb | 26,968 | ipynb | Jupyter Notebook | SQL Case Study - Country Club/Unit-8.3_SQL-Project.ipynb | shalin4788/Springboard-Do-not-refer- | e7627e6f4b09456e08c6f10baeb66b0a22422b7a | [
"MIT"
] | 2 | 2020-10-23T06:24:18.000Z | 2020-10-23T06:24:25.000Z | SQL Case Study - Country Club/Unit-8.3_SQL-Project.ipynb | shalin4788/Springboard-Do-not-refer- | e7627e6f4b09456e08c6f10baeb66b0a22422b7a | [
"MIT"
] | 5 | 2021-06-08T22:56:21.000Z | 2022-01-13T03:35:07.000Z | SQL Case Study - Country Club/Unit-8.3_SQL-Project.ipynb | shalin4788/Springboard-Do-not-refer- | e7627e6f4b09456e08c6f10baeb66b0a22422b7a | [
"MIT"
] | null | null | null | 29.798895 | 145 | 0.358351 | [
[
[
"# Import packages\nfrom sqlalchemy import create_engine\nimport pandas as pd",
"_____no_output_____"
],
[
"# Create engine: engine\nengine = create_engine('sqlite:///country_club.db')",
"_____no_output_____"
],
[
"# Execute query and store records in DataFrame: df\ndf = pd.read_sql_query(\"Select * from Members\", engine)",
"_____no_output_____"
],
[
"df.head(5)",
"_____no_output_____"
]
],
[
[
"### Q10. Produce a list of facilities with a total revenue less than 1000.The output of facility name and total revenue, sorted by revenue.",
"_____no_output_____"
]
],
[
[
"query = \"\"\"\nSELECT \n sub2.name AS facilityname, \n sub2.totalrevenue AS totalrevenue \nFROM \n (\n SELECT \n sub1.facilityname AS name, \n SUM(sub1.revenue) AS totalrevenue \n FROM \n (\n SELECT \n b.bookid, \n f.name AS facilityname, \n CASE WHEN b.memid = 0 THEN (b.slots * f.guestcost) ELSE b.slots * f.membercost END AS Revenue \n FROM \n Bookings AS b \n LEFT JOIN Members AS m ON m.memid = b.memid \n LEFT JOIN Facilities AS f ON f.facid = b.facid\n ) AS sub1 \n GROUP BY \n sub1.facilityname\n ) AS sub2 \nGROUP BY \n facilityname \nHAVING \n totalrevenue < 1000 \nORDER BY \n totalrevenue DESC;\n\"\"\"\npd.read_sql_query(query, engine)",
"_____no_output_____"
]
],
[
[
"### Q11: Produce a report of members and who recommended them in alphabetic surname,firstname order",
"_____no_output_____"
]
],
[
[
"query = \"\"\"\nSELECT \n sub2.memberName AS membername, \n sub2.recommenderfirstname || ', ' || sub2.recommendersurname AS recommendername \nFROM \n (\n SELECT \n sub1.memberName AS memberName, \n sub1.recommenderId AS memberId, \n m.firstname AS recommenderfirstname, \n m.surname AS recommendersurname \n FROM \n (\n SELECT \n m2.memid AS memberId, \n m1.firstname || ', ' || m1.surname AS memberName, \n m2.recommendedby AS recommenderId \n FROM \n Members AS m1 \n INNER JOIN Members AS m2 ON m1.memid = m2.memid \n WHERE \n (\n m2.recommendedby IS NOT NULL \n OR m2.recommendedby <> ' ' \n OR m2.recommendedby <> ''\n ) \n AND m1.memid <> 0\n ) AS sub1 \n LEFT JOIN Members AS m ON sub1.recommenderId = m.memid \n WHERE \n m.memid <> 0\n ) AS sub2;\n\"\"\"\npd.read_sql_query(query, engine)",
"_____no_output_____"
]
],
[
[
"### Q12: Find the facilities with their usage by member, but not guests ",
"_____no_output_____"
]
],
[
[
"query = \"\"\"\nSELECT \n f.name AS facilityname, \n SUM(b.slots) AS slot_usage \nFROM \n Bookings AS b \n LEFT JOIN Facilities AS f ON f.facid = b.facid \n LEFT JOIN Members AS m ON m.memid = b.memid \nWHERE \n b.memid <> 0 \nGROUP BY \n facilityname \nORDER BY \n slot_usage DESC;\n\n\"\"\"\npd.read_sql_query(query, engine)",
"_____no_output_____"
]
],
[
[
"### Q13: Find the facilities usage by month, but not guests ",
"_____no_output_____"
]
],
[
[
"query = \"\"\"\nSELECT \n sub.MONTH AS MONTH, \n sub.facilityname AS facility, \n SUM(sub.slotNumber) AS slotusage \nFROM \n (\n SELECT \n strftime('%m', starttime) AS MONTH, \n f.name AS facilityname, \n b.slots AS slotNumber \n FROM \n Bookings AS b \n LEFT JOIN Facilities AS f ON f.facid = b.facid \n LEFT JOIN Members AS m ON m.memid = b.memid \n WHERE \n b.memid <> 0\n ) sub \nGROUP BY \n MONTH, \n facility \nORDER BY \n MONTH, \n slotusage DESC;\n\"\"\"\npd.read_sql_query(query, engine)",
"_____no_output_____"
]
]
] | [
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code"
] | [
[
"code",
"code",
"code",
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
]
] |
e7fd5b7276c446eb1f7d781dd134be5eab0f7287 | 265,861 | ipynb | Jupyter Notebook | scikit-learn/scikit-learn-svm.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 24,753 | 2015-06-01T10:56:36.000Z | 2022-03-31T19:19:58.000Z | scikit-learn/scikit-learn-svm.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | scikit-learn/scikit-learn-svm.ipynb | AadityaGupta/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials | 352dd6d9a785e22fde0ce53a6b0c2e56f4964950 | [
"Apache-2.0"
] | 7,653 | 2015-06-06T23:19:20.000Z | 2022-03-31T06:57:39.000Z | 708.962667 | 52,452 | 0.9384 | [
[
[
"# scikit-learn-svm",
"_____no_output_____"
],
[
"Credits: Forked from [PyCon 2015 Scikit-learn Tutorial](https://github.com/jakevdp/sklearn_pycon2015) by Jake VanderPlas\n\n* Support Vector Machine Classifier\n* Support Vector Machine with Kernels Classifier",
"_____no_output_____"
]
],
[
[
"%matplotlib inline\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn; \nfrom sklearn.linear_model import LinearRegression\nfrom scipy import stats\nimport pylab as pl\n\nseaborn.set()",
"_____no_output_____"
]
],
[
[
"## Support Vector Machine Classifier",
"_____no_output_____"
],
[
"Support Vector Machines (SVMs) are a powerful supervised learning algorithm used for **classification** or for **regression**. SVMs draw a boundary between clusters of data. SVMs attempt to maximize the margin between sets of points. Many lines can be drawn to separate the points above:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets.samples_generator import make_blobs\nX, y = make_blobs(n_samples=50, centers=2,\n random_state=0, cluster_std=0.60)\n\nxfit = np.linspace(-1, 3.5)\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')\n\n# Draw three lines that couple separate the data\nfor m, b, d in [(1, 0.65, 0.33), (0.5, 1.6, 0.55), (-0.2, 2.9, 0.2)]:\n yfit = m * xfit + b\n plt.plot(xfit, yfit, '-k')\n plt.fill_between(xfit, yfit - d, yfit + d, edgecolor='none', color='#AAAAAA', alpha=0.4)\n\nplt.xlim(-1, 3.5);",
"_____no_output_____"
]
],
[
[
"Fit the model:",
"_____no_output_____"
]
],
[
[
"from sklearn.svm import SVC\nclf = SVC(kernel='linear')\nclf.fit(X, y)",
"_____no_output_____"
]
],
[
[
"Plot the boundary:",
"_____no_output_____"
]
],
[
[
"def plot_svc_decision_function(clf, ax=None):\n \"\"\"Plot the decision function for a 2D SVC\"\"\"\n if ax is None:\n ax = plt.gca()\n x = np.linspace(plt.xlim()[0], plt.xlim()[1], 30)\n y = np.linspace(plt.ylim()[0], plt.ylim()[1], 30)\n Y, X = np.meshgrid(y, x)\n P = np.zeros_like(X)\n for i, xi in enumerate(x):\n for j, yj in enumerate(y):\n P[i, j] = clf.decision_function([xi, yj])\n # plot the margins\n ax.contour(X, Y, P, colors='k',\n levels=[-1, 0, 1], alpha=0.5,\n linestyles=['--', '-', '--'])",
"_____no_output_____"
]
],
[
[
"In the following plot the dashed lines touch a couple of the points known as *support vectors*, which are stored in the ``support_vectors_`` attribute of the classifier:",
"_____no_output_____"
]
],
[
[
"plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')\nplot_svc_decision_function(clf)\nplt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=200, facecolors='none');",
"_____no_output_____"
]
],
[
[
"Use IPython's ``interact`` functionality to explore how the distribution of points affects the support vectors and the discriminative fit:",
"_____no_output_____"
]
],
[
[
"from IPython.html.widgets import interact\n\ndef plot_svm(N=100):\n X, y = make_blobs(n_samples=200, centers=2,\n random_state=0, cluster_std=0.60)\n X = X[:N]\n y = y[:N]\n clf = SVC(kernel='linear')\n clf.fit(X, y)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')\n plt.xlim(-1, 4)\n plt.ylim(-1, 6)\n plot_svc_decision_function(clf, plt.gca())\n plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=200, facecolors='none')\n \ninteract(plot_svm, N=[10, 200], kernel='linear');",
"_____no_output_____"
]
],
[
[
"## Support Vector Machine with Kernels Classifier\n\nKernels are useful when the decision boundary is not linear. A Kernel is some functional transformation of the input data. SVMs have clever tricks to ensure kernel calculations are efficient. In the example below, a linear boundary is not useful in separating the groups of points:",
"_____no_output_____"
]
],
[
[
"from sklearn.datasets.samples_generator import make_circles\nX, y = make_circles(100, factor=.1, noise=.1)\n\nclf = SVC(kernel='linear').fit(X, y)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')\nplot_svc_decision_function(clf);",
"_____no_output_____"
]
],
[
[
"A simple model that could be useful is a **radial basis function**:",
"_____no_output_____"
]
],
[
[
"r = np.exp(-(X[:, 0] ** 2 + X[:, 1] ** 2))\n\nfrom mpl_toolkits import mplot3d\n\ndef plot_3D(elev=30, azim=30):\n ax = plt.subplot(projection='3d')\n ax.scatter3D(X[:, 0], X[:, 1], r, c=y, s=50, cmap='spring')\n ax.view_init(elev=elev, azim=azim)\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('r')\n\ninteract(plot_3D, elev=[-90, 90], azip=(-180, 180));",
"_____no_output_____"
]
],
[
[
"In three dimensions, there is a clear separation between the data. Run the SVM with the rbf kernel:",
"_____no_output_____"
]
],
[
[
"clf = SVC(kernel='rbf')\nclf.fit(X, y)\n\nplt.scatter(X[:, 0], X[:, 1], c=y, s=50, cmap='spring')\nplot_svc_decision_function(clf)\nplt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],\n s=200, facecolors='none');",
"_____no_output_____"
]
],
[
[
"SVM additional notes:\n* When using an SVM you need to choose the right values for parameters such as c and gamma. Model validation can help to determine these optimal values by trial and error.\n* SVMs run in O(n^3) performance. LinearSVC is scalable, SVC does not seem to be scalable. For large data sets try transforming the data to a smaller space and use LinearSVC with rbf.",
"_____no_output_____"
]
]
] | [
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown",
"code",
"markdown"
] | [
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown",
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
],
[
"code"
],
[
"markdown"
]
] |