hexsha
stringlengths
40
40
size
int64
6
14.9M
ext
stringclasses
1 value
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
6
260
max_stars_repo_name
stringlengths
6
119
max_stars_repo_head_hexsha
stringlengths
40
41
max_stars_repo_licenses
sequence
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
6
260
max_issues_repo_name
stringlengths
6
119
max_issues_repo_head_hexsha
stringlengths
40
41
max_issues_repo_licenses
sequence
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
6
260
max_forks_repo_name
stringlengths
6
119
max_forks_repo_head_hexsha
stringlengths
40
41
max_forks_repo_licenses
sequence
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
avg_line_length
float64
2
1.04M
max_line_length
int64
2
11.2M
alphanum_fraction
float64
0
1
cells
sequence
cell_types
sequence
cell_type_groups
sequence
d012cd5ddc13f715c20f63b335c206cc4da7b2af
476,685
ipynb
Jupyter Notebook
03-tabular/treeinterpreters.ipynb
munnm/XAI-for-practitioners
ff4549eed12b61b742a71ab2438001e288a8df62
[ "Apache-2.0" ]
null
null
null
03-tabular/treeinterpreters.ipynb
munnm/XAI-for-practitioners
ff4549eed12b61b742a71ab2438001e288a8df62
[ "Apache-2.0" ]
null
null
null
03-tabular/treeinterpreters.ipynb
munnm/XAI-for-practitioners
ff4549eed12b61b742a71ab2438001e288a8df62
[ "Apache-2.0" ]
null
null
null
506.572795
249,663
0.704029
[ [ [ "# Interpreting Tree Models", "_____no_output_____" ], [ "You'll need to install the `treeinterpreter` library. ", "_____no_output_____" ] ], [ [ "# !pip install treeinterpreter", "_____no_output_____" ], [ "import sklearn\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.datasets import fetch_california_housing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.tree import DecisionTreeRegressor, export_graphviz\nfrom sklearn.ensemble import RandomForestRegressor\n\nfrom treeinterpreter import treeinterpreter as ti\n\nfrom IPython.display import Image", "_____no_output_____" ], [ "print('The scikit-learn version is {}.'.format(sklearn.__version__))", "The scikit-learn version is 1.0.1.\n" ] ], [ [ "## For Regression task\n\nLoad the dataset.", "_____no_output_____" ] ], [ [ "cal_housing = fetch_california_housing()", "_____no_output_____" ], [ "print(cal_housing.DESCR)", ".. _california_housing_dataset:\n\nCalifornia Housing dataset\n--------------------------\n\n**Data Set Characteristics:**\n\n :Number of Instances: 20640\n\n :Number of Attributes: 8 numeric, predictive attributes and the target\n\n :Attribute Information:\n - MedInc median income in block group\n - HouseAge median house age in block group\n - AveRooms average number of rooms per household\n - AveBedrms average number of bedrooms per household\n - Population block group population\n - AveOccup average number of household members\n - Latitude block group latitude\n - Longitude block group longitude\n\n :Missing Attribute Values: None\n\nThis dataset was obtained from the StatLib repository.\nhttps://www.dcc.fc.up.pt/~ltorgo/Regression/cal_housing.html\n\nThe target variable is the median house value for California districts,\nexpressed in hundreds of thousands of dollars ($100,000).\n\nThis dataset was derived from the 1990 U.S. census, using one row per census\nblock group. A block group is the smallest geographical unit for which the U.S.\nCensus Bureau publishes sample data (a block group typically has a population\nof 600 to 3,000 people).\n\nAn household is a group of people residing within a home. Since the average\nnumber of rooms and bedrooms in this dataset are provided per household, these\ncolumns may take surpinsingly large values for block groups with few households\nand many empty houses, such as vacation resorts.\n\nIt can be downloaded/loaded using the\n:func:`sklearn.datasets.fetch_california_housing` function.\n\n.. topic:: References\n\n - Pace, R. Kelley and Ronald Barry, Sparse Spatial Autoregressions,\n Statistics and Probability Letters, 33 (1997) 291-297\n\n" ], [ "X = cal_housing.data\ny = cal_housing.target\ncal_features = cal_housing.feature_names\n\ndf = pd.concat((pd.DataFrame(X, columns=cal_features),\n pd.DataFrame({'MedianHouseVal': y})), axis=1)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "#### Visualizing a Decision Tree\n\nYou will need to install the `pydotplus` library.", "_____no_output_____" ] ], [ [ "#!pip install pydotplus", "_____no_output_____" ], [ "import pydotplus", "_____no_output_____" ], [ "# Create dataset\nX_train, X_test, y_train, y_test = train_test_split(df[cal_features], y, test_size=0.2)", "_____no_output_____" ], [ "dt_reg = DecisionTreeRegressor(max_depth=3)\ndt_reg.fit(X_train, y_train)\n\ndot_data = export_graphviz(dt_reg, out_file=\"ca_housing.dot\",\n feature_names=cal_features,\n filled=True, rounded=True,\n special_characters=True,\n leaves_parallel=False)\ngraph = pydotplus.graphviz.graph_from_dot_file(\"ca_housing.dot\")\nImage(graph.create_png())", "_____no_output_____" ] ], [ [ "Make a sample prediction.", "_____no_output_____" ] ], [ [ "X_test[cal_features].iloc[[0]].transpose()", "_____no_output_____" ], [ "dt_reg.predict(X_test[cal_features].iloc[[0]])", "_____no_output_____" ] ], [ [ "The root node is the mean of the labels from the training data.", "_____no_output_____" ] ], [ [ "y_train.mean()", "_____no_output_____" ] ], [ [ "#### Train a simple Random Forest", "_____no_output_____" ] ], [ [ "rf_reg = RandomForestRegressor()\nrf_reg.fit(X_train, y_train)", "_____no_output_____" ], [ "print(f'Instance 11 prediction: {rf_reg.predict(X_test.iloc[[11]])}')\nprint(f'Instance 17 prediction: {rf_reg.predict(X_test.iloc[[17]])}')", "Instance 11 prediction: [1.6272303]\nInstance 17 prediction: [4.8203671]\n" ], [ "idx = 11\nfrom treeinterpreter import treeinterpreter\nprediction, bias, contributions = treeinterpreter.predict(rf_reg,\n X_test.iloc[[idx]].values)\nprint(f'prediction: {prediction}')\nprint(f'bias: {bias}')\nprint(f'contributions: {contributions}')", "prediction: [[1.6272303]]\nbias: [2.06854048]\ncontributions: [[-0.95730866 -0.01659199 -0.10754417 0.12281316 0.60712801 -0.05952097\n 0.02122049 -0.05150605]]\n" ], [ "for idx in [11, 17]:\n print(f'Instance: {idx}')\n prediction, bias, contributions = treeinterpreter.predict(\n rf_reg, X_test.iloc[[idx]].values)\n print(f'Bias term (training set mean): {bias}')\n print(f'Feature contributions:')\n for contribution, feature in sorted(zip(contributions[0],\n cal_features),\n key=lambda x: -abs(x[0])):\n print(feature, round(contribution, 2))\n print('-'*20) ", "Instance: 11\nBias term (training set mean): [2.06854048]\nFeature contributions:\nMedInc -0.96\nPopulation 0.61\nAveBedrms 0.12\nAveRooms -0.11\nAveOccup -0.06\nLongitude -0.05\nLatitude 0.02\nHouseAge -0.02\n--------------------\nInstance: 17\nBias term (training set mean): [2.06854048]\nFeature contributions:\nMedInc 2.8\nHouseAge -0.18\nPopulation 0.07\nAveBedrms 0.04\nLongitude -0.02\nAveRooms 0.02\nLatitude 0.01\nAveOccup 0.0\n--------------------\n" ], [ "idx = 17\nprediction, bias, contributions = treeinterpreter.predict(\n rf_reg, X_test.iloc[[idx]].values)\nprint(f'prediction: {prediction[0]}')\nprint(f'bias + contributions: {bias + np.sum(contributions)}')", "prediction: [4.8203671]\nbias + contributions: [4.8203671]\n" ] ], [ [ "In fact, we can check that this holds for all elements of the test set:", "_____no_output_____" ] ], [ [ "predictions, biases, contributions = treeinterpreter.predict(\n rf_reg, X_test.values)\n\nassert(np.allclose(np.squeeze(predictions), biases + np.sum(contributions, axis=1)))\nassert(np.allclose(rf_reg.predict(X_test), biases + np.sum(contributions, axis=1)))", "_____no_output_____" ] ], [ [ "## Comparing Contributions across data slices", "_____no_output_____" ] ], [ [ "X1_test = X_test[:X_test.shape[0]//2:]\nX2_test = X_test[X_test.shape[0]//2:]", "_____no_output_____" ], [ "predictions1, biases1, contributions1 = ti.predict(rf_reg, X1_test.values)\npredictions2, biases2, contributions2 = ti.predict(rf_reg, X2_test.values)", "_____no_output_____" ], [ "total_contribs1 = np.mean(contributions1, axis=0) \ntotal_contribs2 = np.mean(contributions2, axis=0) \n\nprint(f'Total contributions from X1_test: {total_contribs1}')\nprint(f'Total contributions from X2_test: {total_contribs2}')\n\nprint(f'Sum of feature contributions differences: {np.sum(total_contribs1 - total_contribs2)}')\nprint(f'Difference between the average predictions: {np.mean(predictions1) - np.mean(predictions2)}')", "_____no_output_____" ] ], [ [ "## TreeExplainer with SHAP", "_____no_output_____" ] ], [ [ "from sklearn.model_selection import train_test_split\nimport xgboost as xgb\nimport shap\n\n# print the JS visualization code to the notebook\nshap.initjs()", "_____no_output_____" ], [ "import xgboost as xgb\nxgb_reg = xgb.XGBClassifier(max_depth=3,\n n_estimators=300,\n learning_rate=0.05)\nxgb_reg.fit(X_train, y_train)", "The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n" ], [ "model_mse_error = np.sqrt(np.mean((xgb_reg.predict(X_test) - y_test)**2))\nprint(f'Mean squared error of MLP model: {model_mse_error}')", "Mean squared error of MLP model: 6.05303719295192\n" ], [ "explainer = shap.TreeExplainer(xgb_reg)\nshap_values = explainer.shap_values(X_train)\n\nshap.force_plot(explainer.expected_value[1],\n shap_values[1][0,:],\n X_train.iloc[0,:])", "_____no_output_____" ], [ "shap.force_plot(explainer.expected_value[1], shap_values[1][:1000,:], X_train.iloc[:1000,:])", "_____no_output_____" ] ], [ [ "Copyright 2022 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d012ecb3fbefcbb1488eae5dbed420b941b8f860
19,204
ipynb
Jupyter Notebook
Stock_Algorithms/Bayesian_Ridge_Regression_Part2.ipynb
clairvoyant/Deep-Learning-Machine-Learning-Stock
2c848619975641cbbdad09c1f12949d374220d81
[ "MIT" ]
null
null
null
Stock_Algorithms/Bayesian_Ridge_Regression_Part2.ipynb
clairvoyant/Deep-Learning-Machine-Learning-Stock
2c848619975641cbbdad09c1f12949d374220d81
[ "MIT" ]
null
null
null
Stock_Algorithms/Bayesian_Ridge_Regression_Part2.ipynb
clairvoyant/Deep-Learning-Machine-Learning-Stock
2c848619975641cbbdad09c1f12949d374220d81
[ "MIT" ]
null
null
null
39.112016
2,194
0.467559
[ [ [ "# Bayesian Ridge Regression Part 2", "_____no_output_____" ], [ "### Multiple Features", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n# yahoo finance is used to fetch data \nimport yfinance as yf\nyf.pdr_override()", "_____no_output_____" ], [ "# input\nsymbol = 'AMD'\nstart = '2014-01-01'\nend = '2018-08-27'\n\n# Read data \ndataset = yf.download(symbol,start,end)\n\n# View Columns\ndataset.head()", "[*********************100%***********************] 1 of 1 completed\n" ], [ "dataset['Increase_Decrease'] = np.where(dataset['Volume'].shift(-1) > dataset['Volume'],1,0)\ndataset['Buy_Sell_on_Open'] = np.where(dataset['Open'].shift(-1) > dataset['Open'],1,0)\ndataset['Buy_Sell'] = np.where(dataset['Adj Close'].shift(-1) > dataset['Adj Close'],1,0)\ndataset['Returns'] = dataset['Adj Close'].pct_change()\ndataset = dataset.dropna()\ndataset.head()", "_____no_output_____" ], [ "dataset.shape", "_____no_output_____" ], [ "X = np.asanyarray(dataset[['Open','High','Low', 'Volume', 'Increase_Decrease', 'Buy_Sell_on_Open', 'Buy_Sell', 'Returns']])\ny = np.asanyarray(dataset[['Adj Close']])", "_____no_output_____" ], [ "from sklearn.linear_model import BayesianRidge, LinearRegression\n\n# Fit the Bayesian Ridge Regression and an OLS for comparison\nmodel = BayesianRidge(compute_score=True)\nmodel.fit(X, y)", "_____no_output_____" ], [ "model.coef_", "_____no_output_____" ], [ "model.scores_", "_____no_output_____" ], [ "from sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)", "_____no_output_____" ], [ "model = BayesianRidge(compute_score=True)\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "model.coef_", "_____no_output_____" ], [ "model.scores_", "_____no_output_____" ], [ "y_pred = model.predict(X_test)", "_____no_output_____" ], [ "from sklearn.metrics import mean_squared_error\nprint('The rmse of prediction is:', mean_squared_error(y_test, y_pred) ** 0.5)", "The rmse of prediction is: 0.09287337824617113\n" ], [ "print('Bayesian Ridge Regression Score:', model.score(X_test, y_test))", "Bayesian Ridge Regression Score: 0.9996452147933678\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d012ffe62a1431d47d86d8d80ea697f6294e5e8c
7,587
ipynb
Jupyter Notebook
crawling_news_example.ipynb
wonbeeny/Crawling-Word2Vec
f9b229620b5d91a7c6347695a289586bbcc99ee6
[ "Apache-2.0" ]
null
null
null
crawling_news_example.ipynb
wonbeeny/Crawling-Word2Vec
f9b229620b5d91a7c6347695a289586bbcc99ee6
[ "Apache-2.0" ]
null
null
null
crawling_news_example.ipynb
wonbeeny/Crawling-Word2Vec
f9b229620b5d91a7c6347695a289586bbcc99ee6
[ "Apache-2.0" ]
null
null
null
52.6875
354
0.637406
[ [ [ "from korea_news_crawler.articlecrawler import ArticleCrawler\n\nCrawler = ArticleCrawler() \nCrawler.set_category(\"정치\", \"IT과학\", \"economy\") \nCrawler.set_date_range(2017, 1, 2017, 2) \nCrawler.start()", "{'start_year': 2017, 'start_month': 1, 'end_year': 2018, 'end_month': 4}\n정치 PID: 17385\nIT과학 PID: 17388\neconomy PID: 17393\nIT과학 Urls are generated\nThe crawler starts\n정치 Urls are generated\nThe crawler starts\neconomy Urls are generated\nThe crawler starts\n" ] ] ]
[ "code" ]
[ [ "code" ] ]
d01316254af895ad6c4ca40003021a5c53c018c9
18,489
ipynb
Jupyter Notebook
Linear_Algebra_in_Research.ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
Linear_Algebra_in_Research.ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
Linear_Algebra_in_Research.ipynb
adriangalarion/Lab-Activities-1.1
5e5448f79895080c70ba4ceb357cbc1fba7b5e95
[ "Apache-2.0" ]
null
null
null
145.582677
15,338
0.875764
[ [ [ "##Application of Linear Algebra in Data Science ", "_____no_output_____" ], [ "Here is the Python code to calculate and plot the MSE", "_____no_output_____" ] ], [ [ "import matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "x = list(range(1,6)) #data points\ny = [1,1,2,2,4] #original values \ny_bar = [0.6,1.29,1.99,2.69,3.4] #predicted values\nsummation = 0 \nn = len(y) \nfor i in range(0, n): \n # finding the difference between observed and predicted value\n difference = y[i] - y_bar[i]\n squared_difference = difference**2 # taking square of the differene\n # taking a sum of all the differences\n summation = summation + squared_difference\nMSE = summation/n # get the average of all\nprint(\"The Mean Square Error is: \", MSE)\n#Plot relationship\nplt.scatter(x, y, color='#06AED5')\nplt.plot(x, y_bar, color='#1D3557', linewidth=2)\nplt.xlabel('Data Points', fontsize=12)\nplt.ylabel('Output', fontsize=12)\nplt.title(\"MSE\")", "The Mean Square Error is: 0.21606\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d01319f10e4797ec6d6bd397bec22b9fd8823ee7
9,426
ipynb
Jupyter Notebook
hard_PCR (WSC)/gpt2/src/gpt2_classification.ipynb
HKUST-KnowComp/PCR
3e41ec46af8e186e689973108628340faf5cc696
[ "MIT" ]
5
2020-09-18T09:47:17.000Z
2021-11-04T02:55:39.000Z
hard_PCR (WSC)/gpt2/src/gpt2_classification.ipynb
HKUST-KnowComp/PCR
3e41ec46af8e186e689973108628340faf5cc696
[ "MIT" ]
1
2021-03-16T01:45:54.000Z
2021-03-16T01:45:54.000Z
hard_PCR (WSC)/gpt2/src/gpt2_classification.ipynb
HKUST-KnowComp/PCR
3e41ec46af8e186e689973108628340faf5cc696
[ "MIT" ]
null
null
null
37.404762
306
0.556227
[ [ [ "# Load WSC dataset\n\nimport xml.etree.ElementTree as etree\nimport json\nimport numpy as np\n\nimport logging\nimport numpy\nimport os\n\n\ndef softmax(x):\n return np.exp(x)/sum(np.exp(x))", "_____no_output_____" ], [ "tree = etree.parse('WSCollection.xml')\nroot = tree.getroot()\noriginal_problems = root.getchildren()\nproblems = list()\n\nfor original_problem in original_problems:\n problem = dict()\n for information in original_problem.getchildren():\n if information.tag == 'answers':\n answers = information.getchildren()\n answer_list = list()\n for answer in answers:\n answer_list.append(answer.text.strip())\n problem['answers'] = answer_list\n elif information.tag == 'text':\n texts = information.getchildren()\n text_dict = dict()\n for text1 in texts:\n text_dict[text1.tag] = text1.text.replace('\\n', ' ').strip()\n problem['text'] = text_dict\n elif information.tag == 'quote':\n pass\n else:\n problem[information.tag] = information.text.replace(' ', '')\n problems.append(problem)\n\nprint(problems[0])\n\nall_sentences = list()\nfor question in problems:\n sentence = question['text']['txt1'] + ' ' + question['text']['pron'] + ' ' + question['text']['txt2']\n all_sentences.append(sentence)\n # print(sentence)\n ", "{'text': {'txt1': 'The city councilmen refused the demonstrators a permit because', 'pron': 'they', 'txt2': 'feared violence.'}, 'answers': ['The city councilmen', 'The demonstrators'], 'correctAnswer': 'A', 'source': '(Winograd1972)'}\n" ], [ "import json\nimport numpy as np\nimport tensorflow as tf\nimport model, sample, encoder\n\n\n\nmodel_name = '774M'\nmodels_dir = '../models'\n\nenc = encoder.get_encoder(model_name, models_dir)\n\nbatch_size = 1\nseed=None\nnsamples=1\n\n\nhparams = model.default_hparams()\nwith open(os.path.join(models_dir, model_name, 'hparams.json')) as f:\n hparams.override_from_dict(json.load(f))\n \nlength = hparams.n_ctx // 2\n\nanswer_collector = []\n\n\ndef logits_score(logits,skeleton_tokens, context_tokens):\n score = 1\n start_index = len(skeleton_tokens) - 1 \n end_index = len(context_tokens) - 1\n \n for i in range(end_index - start_index): \n m = softmax(logits[start_index+i])\n score *= m[context_tokens[start_index+i+1]]\n \n return score\n \n\n\nwith tf.Session(graph=tf.Graph()) as sess:\n \n context = tf.placeholder(tf.int32, [batch_size, None])\n np.random.seed(seed)\n tf.set_random_seed(seed)\n \n context_tokens = []\n\n output = model.model(hparams=hparams, X=context, past=None, reuse=tf.AUTO_REUSE)\n\n saver = tf.train.Saver()\n ckpt = tf.train.latest_checkpoint(os.path.join(models_dir, model_name))\n saver.restore(sess, ckpt)\n\n for i in range(273): \n \n if problems[i]['text']['txt1'] != \".\":\n ans0 = problems[i]['answers'][0].replace(\"The\",\"the\")\n ans1 = problems[i]['answers'][1].replace(\"The\",\"the\")\n else:\n ans0 = problems[i]['answers'][0]\n ans1 = problems[i]['answers'][1]\n \n skeleton1 = problems[i]['text']['txt1'] + ' ' + problems[i]['answers'][0]\n skeleton2 = problems[i]['text']['txt1'] + ' ' + problems[i]['answers'][1]\n raw_text1 = problems[i]['text']['txt1'] + ' ' + problems[i]['answers'][0] + ' ' + problems[i]['text']['txt2']\n raw_text2 = problems[i]['text']['txt1'] + ' ' + problems[i]['answers'][1] + ' ' + problems[i]['text']['txt2']\n context_tokens1 = enc.encode(raw_text1)\n context_tokens2 = enc.encode(raw_text2)\n skeleton_tokens1 = enc.encode(skeleton1)\n skeleton_tokens2 = enc.encode(skeleton2)\n \n out1 = sess.run(output, feed_dict={context: [context_tokens1 for _ in range(batch_size)]})\n out2 = sess.run(output, feed_dict={context: [context_tokens2 for _ in range(batch_size)]})\n \n logits1 = out1['logits'][:, :, :hparams.n_vocab]\n logits2 = out2['logits'][:, :, :hparams.n_vocab]\n \n score1 = logits_score(logits1[0],skeleton_tokens1,context_tokens1)\n score2 = logits_score(logits2[0],skeleton_tokens2,context_tokens2) \n \n correctAnswer = problems[i][\"correctAnswer\"]\n\n if score1 >= score2:\n predictedAnswer = \"A\"\n else:\n predictedAnswer = \"B\"\n # A. Problem\n answer_collector.append(predictedAnswer in correctAnswer)", "WARNING: Logging before flag parsing goes to stderr.\nW0730 18:08:22.649106 140254510032704 deprecation_wrapper.py:119] From /home/xzhaoar/gpt-2-master/src/model.py:148: The name tf.variable_scope is deprecated. Please use tf.compat.v1.variable_scope instead.\n\nW0730 18:08:22.661986 140254510032704 deprecation_wrapper.py:119] From /home/xzhaoar/gpt-2-master/src/model.py:152: The name tf.get_variable is deprecated. Please use tf.compat.v1.get_variable instead.\n\nW0730 18:08:22.717363 140254510032704 deprecation_wrapper.py:119] From /home/xzhaoar/gpt-2-master/src/model.py:36: The name tf.rsqrt is deprecated. Please use tf.math.rsqrt instead.\n\nW0730 18:08:29.380876 140254510032704 deprecation.py:323] From /home/xzhaoar/anaconda3/envs/gpt2/lib/python3.7/site-packages/tensorflow/python/training/saver.py:1276: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse standard file APIs to check for files with this prefix.\n" ], [ "print(len(answer_collector))\nprint(np.sum(answer_collector)/273)", "273\n0.6923076923076923\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0131b3145a0b7fab53971bdeddabbcc7b925c9d
6,961
ipynb
Jupyter Notebook
Chapman/Ch2-Problem_2-15.ipynb
dietmarw/EK5312
33b31d953f782043db0264116881060c9f059731
[ "Unlicense" ]
12
2017-07-16T22:28:25.000Z
2021-11-08T05:45:58.000Z
Chapman/Ch2-Problem_2-15.ipynb
dietmarw/EK5312
33b31d953f782043db0264116881060c9f059731
[ "Unlicense" ]
null
null
null
Chapman/Ch2-Problem_2-15.ipynb
dietmarw/EK5312
33b31d953f782043db0264116881060c9f059731
[ "Unlicense" ]
7
2018-01-17T15:01:33.000Z
2021-07-02T19:57:22.000Z
19.444134
220
0.450079
[ [ [ "# Excercises Electric Machinery Fundamentals\n## Chapter 2", "_____no_output_____" ], [ "## Problem 2-15", "_____no_output_____" ] ], [ [ "%pylab notebook", "Populating the interactive namespace from numpy and matplotlib\n" ] ], [ [ "### Description", "_____no_output_____" ], [ "An autotransformer is used to connect a 12.6-kV distribution line to a 13.8-kV distribution line. It must be capable of handling 2000 kVA. There are three phases, connected Y-Y with their neutrals solidly grounded.", "_____no_output_____" ] ], [ [ "Vl = 12.6e3 # [V]\nVh = 13.8e3 # [V]\nSio = 2000e3 # [VA]", "_____no_output_____" ] ], [ [ "#### (a)\n \n * What must the $N_C / N _{SE}$ turns ratio be to accomplish this connection?\n \n#### (b)\n \n * How much apparent power must the windings of each autotransformer handle?\n \n#### (c)\n \n * What is the power advantage of this autotransformer system?\n\n#### (d)\n\n * If one of the autotransformers were reconnected as an ordinary transformer, what would its ratings be?", "_____no_output_____" ], [ "### SOLUTION", "_____no_output_____" ], [ "#### (a)\n\nThe transformer is connected Y-Y, so the primary and secondary phase voltages are the line\nvoltages divided by 3 . The turns ratio of each autotransformer is given by:\n\n$$\\frac{V_H}{V_L} = \\frac{N_C + N_{SE}}{N_C}$$", "_____no_output_____" ] ], [ [ "a = (Vh/sqrt(3)) / (Vl/sqrt(3))\nn_a = 1 / (a-1) # n_a = Nc/Nse\nprint('''\nNc/Nse = {:.1f}\n=============\n'''.format(n_a))", "\nNc/Nse = 10.5\n=============\n\n" ] ], [ [ "#### (b)\n\nThe power advantage of this autotransformer is:\n\n$$\\frac{S_{IO}}{S_W} = \\frac{N_C + N_{SE}}{N_{SE}}$$", "_____no_output_____" ] ], [ [ "n_b = (10.5 + 1) / 1 # n_b = Sio/Sw\nprint('Sio/Sw = {:.1f}'.format(n_b))", "Sio/Sw = 11.5\n" ] ], [ [ "Since 1/3 of the total power is associated with each phase, **the windings in each autotransformer must handle:**", "_____no_output_____" ] ], [ [ "Sw = Sio / (3*n_b)\nprint('''\nSw = {:.1f} kVA\n==============\n'''.format(Sw/1000))", "\nSw = 58.0 kVA\n==============\n\n" ] ], [ [ "#### (c)\n\nAs determined in (b), the power advantage of this autotransformer system is:", "_____no_output_____" ] ], [ [ "print('''\nSio/Sw = {:.1f}\n=============\n'''.format(n_b))", "\nSio/Sw = 11.5\n=============\n\n" ] ], [ [ "#### (d)\n\nThe voltages across each phase of the autotransformer are:", "_____no_output_____" ] ], [ [ "Vh_p = Vh / sqrt(3)\nVl_p = Vl / sqrt(3)\nprint('''\nVh_p = {:.0f} V\nVl_p = {:.0f} V\n'''.format(Vh_p, Vl_p))", "\nVh_p = 7967 V\nVl_p = 7275 V\n\n" ] ], [ [ "The voltage across the common winding ( $N_C$ ) is:", "_____no_output_____" ] ], [ [ "Vnc = Vl_p\nprint('Vnc = {:.0f} V'.format(Vnc))", "Vnc = 7275 V\n" ] ], [ [ "and the voltage across the series winding ( $N_{SE}$ ) is:", "_____no_output_____" ] ], [ [ "Vnse = Vh_p - Vl_p\nprint('Vnse = {:.0f} V'.format(Vnse))", "Vnse = 693 V\n" ] ], [ [ "Therefore, a single phase of the autotransformer connected as an ordinary transformer would be rated at:", "_____no_output_____" ] ], [ [ "print('''\nVnc/Vnse = {:.0f}/{:.0f} Sw = {:.1f} kVA\n=================== =============\n'''.format(Vnc, Vnse, Sw/1000))", "\nVnc/Vnse = 7275/693 Sw = 58.0 kVA\n=================== =============\n\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d01323630ec50a50a92e30896e496b70b8de2361
78,223
ipynb
Jupyter Notebook
Project 3 - Analyze AB Test Results/Analyze_ab_test_results_notebook.ipynb
geochri/Udacity-DAND
197740cf734ac90178a1029c81d9719b22a7aa92
[ "MIT" ]
6
2019-06-10T03:13:26.000Z
2020-05-16T12:31:43.000Z
Project 3 - Analyze AB Test Results/Analyze_ab_test_results_notebook.ipynb
EldorIbragimov/Udacity-DAND
197740cf734ac90178a1029c81d9719b22a7aa92
[ "MIT" ]
null
null
null
Project 3 - Analyze AB Test Results/Analyze_ab_test_results_notebook.ipynb
EldorIbragimov/Udacity-DAND
197740cf734ac90178a1029c81d9719b22a7aa92
[ "MIT" ]
3
2019-06-09T12:59:40.000Z
2019-07-02T05:30:23.000Z
34.084096
5,936
0.490533
[ [ [ "## Analyze A/B Test Results\n\nYou may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric). **Please save regularly.**\n\nThis project will assure you have mastered the subjects covered in the statistics lessons. The hope is to have this project be as comprehensive of these topics as possible. Good luck!\n\n## Table of Contents\n- [Introduction](#intro)\n- [Part I - Probability](#probability)\n- [Part II - A/B Test](#ab_test)\n- [Part III - Regression](#regression)\n\n\n<a id='intro'></a>\n### Introduction\n\nA/B tests are very commonly performed by data analysts and data scientists. It is important that you get some practice working with the difficulties of these \n\nFor this project, you will be working to understand the results of an A/B test run by an e-commerce website. Your goal is to work through this notebook to help the company understand if they should implement the new page, keep the old page, or perhaps run the experiment longer to make their decision.\n\n**As you work through this notebook, follow along in the classroom and answer the corresponding quiz questions associated with each question.** The labels for each classroom concept are provided for each question. This will assure you are on the right track as you work through the project, and you can feel more confident in your final submission meeting the criteria. As a final check, assure you meet all the criteria on the [RUBRIC](https://review.udacity.com/#!/projects/37e27304-ad47-4eb0-a1ab-8c12f60e43d0/rubric).\n\n<a id='probability'></a>\n#### Part I - Probability\n\nTo get started, let's import our libraries.", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n%matplotlib inline\n#We are setting the seed to assure you get the same answers on quizzes as we set up\nrandom.seed(42)", "_____no_output_____" ] ], [ [ "`1.` Now, read in the `ab_data.csv` data. Store it in `df`. **Use your dataframe to answer the questions in Quiz 1 of the classroom.**\n\na. Read in the dataset and take a look at the top few rows here:", "_____no_output_____" ] ], [ [ "df = pd.read_csv('ab_data.csv')\ndf.head()", "_____no_output_____" ] ], [ [ "b. Use the cell below to find the number of rows in the dataset.", "_____no_output_____" ] ], [ [ "df.shape[0]", "_____no_output_____" ] ], [ [ "c. The number of unique users in the dataset.", "_____no_output_____" ] ], [ [ "df.nunique()[0]", "_____no_output_____" ] ], [ [ "d. The proportion of users converted.", "_____no_output_____" ] ], [ [ "df['converted'].sum() / df.shape[0]", "_____no_output_____" ] ], [ [ "e. The number of times the `new_page` and `treatment` don't match.", "_____no_output_____" ] ], [ [ "df[((df['group'] == 'treatment') & (df['landing_page'] != 'new_page')) | ((df['group'] != 'treatment') & (df['landing_page'] == 'new_page'))].shape[0]", "_____no_output_____" ] ], [ [ "f. Do any of the rows have missing values?", "_____no_output_____" ] ], [ [ "df.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 294478 entries, 0 to 294477\nData columns (total 5 columns):\nuser_id 294478 non-null int64\ntimestamp 294478 non-null object\ngroup 294478 non-null object\nlanding_page 294478 non-null object\nconverted 294478 non-null int64\ndtypes: int64(2), object(3)\nmemory usage: 11.2+ MB\n" ] ], [ [ "`2.` For the rows where **treatment** does not match with **new_page** or **control** does not match with **old_page**, we cannot be sure if this row truly received the new or old page. Use **Quiz 2** in the classroom to figure out how we should handle these rows. \n\na. Now use the answer to the quiz to create a new dataset that meets the specifications from the quiz. Store your new dataframe in **df2**.", "_____no_output_____" ] ], [ [ "df2 = df[(((df['group'] == 'treatment') & (df['landing_page'] == 'new_page')) | ((df['group'] == 'control') & (df['landing_page'] == 'old_page')))]\ndf2.head()", "_____no_output_____" ], [ "# Double Check all of the correct rows were removed - this should be 0\ndf2[((df2['group'] == 'treatment') == (df2['landing_page'] == 'new_page')) == False].shape[0]", "_____no_output_____" ] ], [ [ "`3.` Use **df2** and the cells below to answer questions for **Quiz3** in the classroom.", "_____no_output_____" ], [ "a. How many unique **user_id**s are in **df2**?", "_____no_output_____" ] ], [ [ "df2.nunique()[0]", "_____no_output_____" ] ], [ [ "b. There is one **user_id** repeated in **df2**. What is it?", "_____no_output_____" ] ], [ [ "uid = df2[df2['user_id'].duplicated() == True].index[0]\nuid", "_____no_output_____" ] ], [ [ "c. What is the row information for the repeat **user_id**? ", "_____no_output_____" ] ], [ [ "df2.loc[uid]", "_____no_output_____" ] ], [ [ "d. Remove **one** of the rows with a duplicate **user_id**, but keep your dataframe as **df2**.", "_____no_output_____" ] ], [ [ "df2.drop(2893, inplace=True)\ndf2.shape[0]", "/opt/conda/lib/python3.6/site-packages/pandas/core/frame.py:3697: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n errors=errors)\n" ] ], [ [ "`4.` Use **df2** in the cells below to answer the quiz questions related to **Quiz 4** in the classroom.\n\na. What is the probability of an individual converting regardless of the page they receive?", "_____no_output_____" ] ], [ [ "df2[df2['converted'] == 1].shape[0] / df2.shape[0]", "_____no_output_____" ] ], [ [ "b. Given that an individual was in the `control` group, what is the probability they converted?", "_____no_output_____" ] ], [ [ "df2[(df2['converted'] == 1) & ((df2['group'] == 'control'))].shape[0] / df2[(df2['group'] == 'control')].shape[0] ", "_____no_output_____" ] ], [ [ "c. Given that an individual was in the `treatment` group, what is the probability they converted?", "_____no_output_____" ] ], [ [ "df2[(df2['converted'] == 1) & ((df2['group'] == 'treatment'))].shape[0] / df2[(df2['group'] == 'treatment')].shape[0] ", "_____no_output_____" ] ], [ [ "d. What is the probability that an individual received the new page?", "_____no_output_____" ] ], [ [ "df2[df2['landing_page'] == 'new_page'].shape[0] / df2.shape[0]", "_____no_output_____" ] ], [ [ "e. Consider your results from parts (a) through (d) above, and explain below whether you think there is sufficient evidence to conclude that the new treatment page leads to more conversions.", "_____no_output_____" ], [ "**The probability of converting for an individual who received the control page is more than that who received the treatment page. So its more likely to convert for the control page viewers. So there is not much evidence to prove that the new treatment page leads to more conversions**", "_____no_output_____" ], [ "<a id='ab_test'></a>\n### Part II - A/B Test\n\nNotice that because of the time stamp associated with each event, you could technically run a hypothesis test continuously as each observation was observed. \n\nHowever, then the hard question is do you stop as soon as one page is considered significantly better than another or does it need to happen consistently for a certain amount of time? How long do you run to render a decision that neither page is better than another? \n\nThese questions are the difficult parts associated with A/B tests in general. \n\n\n`1.` For now, consider you need to make the decision just based on all the data provided. If you want to assume that the old page is better unless the new page proves to be definitely better at a Type I error rate of 5%, what should your null and alternative hypotheses be? You can state your hypothesis in terms of words or in terms of **$p_{old}$** and **$p_{new}$**, which are the converted rates for the old and new pages.", "_____no_output_____" ], [ "**Put your answer here.**", "_____no_output_____" ], [ "`2.` Assume under the null hypothesis, $p_{new}$ and $p_{old}$ both have \"true\" success rates equal to the **converted** success rate regardless of page - that is $p_{new}$ and $p_{old}$ are equal. Furthermore, assume they are equal to the **converted** rate in **ab_data.csv** regardless of the page. <br><br>\n\nUse a sample size for each page equal to the ones in **ab_data.csv**. <br><br>\n\nPerform the sampling distribution for the difference in **converted** between the two pages over 10,000 iterations of calculating an estimate from the null. <br><br>\n\nUse the cells below to provide the necessary parts of this simulation. If this doesn't make complete sense right now, don't worry - you are going to work through the problems below to complete this problem. You can use **Quiz 5** in the classroom to make sure you are on the right track.<br><br>", "_____no_output_____" ] ], [ [ "df2.head()", "_____no_output_____" ] ], [ [ "a. What is the **conversion rate** for $p_{new}$ under the null? ", "_____no_output_____" ] ], [ [ "p_new = df2[(df2['converted'] == 1)].shape[0] / df2.shape[0]\np_new", "_____no_output_____" ] ], [ [ "b. What is the **conversion rate** for $p_{old}$ under the null? <br><br>", "_____no_output_____" ] ], [ [ "p_old = df2[(df2['converted'] == 1)].shape[0] / df2.shape[0]\np_old", "_____no_output_____" ] ], [ [ "c. What is $n_{new}$, the number of individuals in the treatment group?", "_____no_output_____" ] ], [ [ "n_new = df2[(df2['landing_page'] == 'new_page') & (df2['group'] == 'treatment')].shape[0]\nn_new", "_____no_output_____" ] ], [ [ "d. What is $n_{old}$, the number of individuals in the control group?", "_____no_output_____" ] ], [ [ "n_old = df2[(df2['landing_page'] == 'old_page') & (df2['group'] == 'control')].shape[0] \nn_old", "_____no_output_____" ] ], [ [ "e. Simulate $n_{new}$ transactions with a conversion rate of $p_{new}$ under the null. Store these $n_{new}$ 1's and 0's in **new_page_converted**.", "_____no_output_____" ] ], [ [ "new_page_converted = np.random.choice([1,0],n_new, p=(p_new,1-p_new))\nnew_page_converted.mean()", "_____no_output_____" ] ], [ [ "f. Simulate $n_{old}$ transactions with a conversion rate of $p_{old}$ under the null. Store these $n_{old}$ 1's and 0's in **old_page_converted**.", "_____no_output_____" ] ], [ [ "old_page_converted = np.random.choice([1,0],n_old, p=(p_old,1-p_old))\nold_page_converted.mean()", "_____no_output_____" ] ], [ [ "g. Find $p_{new}$ - $p_{old}$ for your simulated values from part (e) and (f).", "_____no_output_____" ] ], [ [ "# p_new - p_old\nnew_page_converted.mean() - old_page_converted.mean()", "_____no_output_____" ] ], [ [ "h. Create 10,000 $p_{new}$ - $p_{old}$ values using the same simulation process you used in parts (a) through (g) above. Store all 10,000 values in a NumPy array called **p_diffs**.", "_____no_output_____" ] ], [ [ "p_diffs = []\n\nfor _ in range(10000):\n\n new_page_converted = np.random.choice([0, 1], size = n_new, p = [1-p_new, p_new], replace = True).sum()\n\n old_page_converted = np.random.choice([0, 1], size = n_old, p = [1-p_old, p_old], replace = True).sum()\n\n diff = new_page_converted/n_new - old_page_converted/n_old\n\n p_diffs.append(diff)\n\np_diffs = np.array(p_diffs)\np_diffs", "_____no_output_____" ] ], [ [ "i. Plot a histogram of the **p_diffs**. Does this plot look like what you expected? Use the matching problem in the classroom to assure you fully understand what was computed here.", "_____no_output_____" ] ], [ [ "plt.hist(p_diffs);\nplt.plot();", "_____no_output_____" ] ], [ [ "j. What proportion of the **p_diffs** are greater than the actual difference observed in **ab_data.csv**?", "_____no_output_____" ] ], [ [ "# (p_diffs > (p_new - p_old))\nprop = (p_diffs > df['converted'].sample(10000)).mean()\nprop", "_____no_output_____" ] ], [ [ "k. Please explain using the vocabulary you've learned in this course what you just computed in part **j.** What is this value called in scientific studies? What does this value mean in terms of whether or not there is a difference between the new and old pages?", "_____no_output_____" ], [ "**Difference is not significant**", "_____no_output_____" ], [ "l. We could also use a built-in to achieve similar results. Though using the built-in might be easier to code, the above portions are a walkthrough of the ideas that are critical to correctly thinking about statistical significance. Fill in the below to calculate the number of conversions for each page, as well as the number of individuals who received each page. Let `n_old` and `n_new` refer the the number of rows associated with the old page and new pages, respectively.", "_____no_output_____" ] ], [ [ "import statsmodels.api as sm\n\nconvert_old = df2[(df2['landing_page'] == 'old_page') & (df2['group'] == 'control')]\nconvert_new = df2[(df2['landing_page'] == 'new_page') & (df2['group'] == 'treatment')]\nn_old = convert_old.shape[0]\nn_new = convert_new.shape[0]\nn_old, n_new\n# df2.head()", "_____no_output_____" ] ], [ [ "m. Now use `stats.proportions_ztest` to compute your test statistic and p-value. [Here](http://knowledgetack.com/python/statsmodels/proportions_ztest/) is a helpful link on using the built in.", "_____no_output_____" ] ], [ [ "from statsmodels.stats.proportion import proportions_ztest\n\n(df2['converted'] == 1).sum()\ndf2.shape[0]\nprop\n\nstat, pval = proportions_ztest((df2['converted'] == 1).sum(), df2.shape[0], prop)\nstat, pval", "_____no_output_____" ] ], [ [ "n. What do the z-score and p-value you computed in the previous question mean for the conversion rates of the old and new pages? Do they agree with the findings in parts **j.** and **k.**?", "_____no_output_____" ], [ "**p val = 0**\n\n**No**", "_____no_output_____" ], [ "<a id='regression'></a>\n### Part III - A regression approach\n\n`1.` In this final part, you will see that the result you achieved in the A/B test in Part II above can also be achieved by performing regression.<br><br> \n\na. Since each row is either a conversion or no conversion, what type of regression should you be performing in this case?", "_____no_output_____" ], [ "**Logical Regression**", "_____no_output_____" ] ], [ [ "df2.head()", "_____no_output_____" ] ], [ [ "b. The goal is to use **statsmodels** to fit the regression model you specified in part **a.** to see if there is a significant difference in conversion based on which page a customer receives. However, you first need to create in df2 a column for the intercept, and create a dummy variable column for which page each user received. Add an **intercept** column, as well as an **ab_page** column, which is 1 when an individual receives the **treatment** and 0 if **control**.", "_____no_output_____" ] ], [ [ "import statsmodels.api as sm\n\ndf2[['control','ab_page']] = pd.get_dummies(df2['group'])\ndf2.drop(['control','group'],axis=1, inplace=True)\ndf2.head()", "/opt/conda/lib/python3.6/site-packages/pandas/core/frame.py:3140: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n self[k1] = value[k2]\n/opt/conda/lib/python3.6/site-packages/pandas/core/frame.py:3697: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n errors=errors)\n" ] ], [ [ "c. Use **statsmodels** to instantiate your regression model on the two columns you created in part b., then fit the model using the two columns you created in part **b.** to predict whether or not an individual converts. ", "_____no_output_____" ] ], [ [ "df2['intercept'] = 1\n\nlogit_mod = sm.Logit(df2['converted'], df2[['intercept','ab_page']])\nresults = logit_mod.fit()", "/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:1: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame.\nTry using .loc[row_indexer,col_indexer] = value instead\n\nSee the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy\n \"\"\"Entry point for launching an IPython kernel.\n" ], [ "np.exp(-0.0150)", "_____no_output_____" ], [ "1/np.exp(-0.0150)", "_____no_output_____" ] ], [ [ "d. Provide the summary of your model below, and use it as necessary to answer the following questions.", "_____no_output_____" ] ], [ [ "results.summary()", "_____no_output_____" ] ], [ [ "e. What is the p-value associated with **ab_page**? Why does it differ from the value you found in **Part II**?<br><br> **Hint**: What are the null and alternative hypotheses associated with your regression model, and how do they compare to the null and alternative hypotheses in **Part II**?", "_____no_output_____" ], [ "**P value = 0.190**", "_____no_output_____" ], [ "f. Now, you are considering other things that might influence whether or not an individual converts. Discuss why it is a good idea to consider other factors to add into your regression model. Are there any disadvantages to adding additional terms into your regression model?", "_____no_output_____" ], [ "**Yes, its good to check for some more fields**\n\n**Disadvantage - It may not be as easy to interpret as in the previous case**", "_____no_output_____" ], [ "g. Now along with testing if the conversion rate changes for different pages, also add an effect based on which country a user lives in. You will need to read in the **countries.csv** dataset and merge together your datasets on the appropriate rows. [Here](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.join.html) are the docs for joining tables. \n\nDoes it appear that country had an impact on conversion? Don't forget to create dummy variables for these country columns - **Hint: You will need two columns for the three dummy variables.** Provide the statistical output as well as a written response to answer this question.", "_____no_output_____" ] ], [ [ "df_countries = pd.read_csv('countries.csv')\ndf_countries.head()\n\ndf_merged = pd.merge(df2,df_countries, left_on='user_id', right_on='user_id')\ndf_merged.head()", "_____no_output_____" ], [ "df_merged[['US','UK','CA']] = pd.get_dummies(df_merged['country'])\ndf_merged.drop(['country','CA'],axis=1, inplace=True)\ndf_merged.head()", "_____no_output_____" ], [ "df_merged['intercept'] = 1\n\nlogit_mod = sm.Logit(df_merged['converted'], df_merged[['intercept','US','UK']])\nresults = logit_mod.fit()\nresults.summary()", "Optimization terminated successfully.\n Current function value: 0.366116\n Iterations 6\n" ] ], [ [ "**US ia having negative coeff which means that conversion rate decreases if person is from US**\n\n**UK ia having positive coeff which means that conversion rate increases if person is from UK**", "_____no_output_____" ], [ "h. Though you have now looked at the individual factors of country and page on conversion, we would now like to look at an interaction between page and country to see if there significant effects on conversion. Create the necessary additional columns, and fit the new model. \n\nProvide the summary results, and your conclusions based on the results.", "_____no_output_____" ] ], [ [ "final_df = df_merged[['user_id','timestamp','landing_page','converted','ab_page','US','UK']]\nfinal_df.head()", "_____no_output_____" ], [ "final_df['intercept'] = 1\n\nlogit_mod = sm.Logit(final_df['ab_page'], final_df[['intercept','US','UK']])\nresults = logit_mod.fit()\nresults.summary()", "Optimization terminated successfully.\n Current function value: 0.760413\n Iterations 3\n" ] ], [ [ "**'ab_page' column is 1 when an individual receives the treatment and 0 if control.**\n\n**US ia having positive coeff which means that chance of getting treatment page increases **\n\n**UK ia having negative coeff which means that change of getting control page increases**", "_____no_output_____" ], [ "<a id='conclusions'></a>\n## Finishing Up\n\n> Congratulations! You have reached the end of the A/B Test Results project! You should be very proud of all you have accomplished!\n\n> **Tip**: Once you are satisfied with your work here, check over your report to make sure that it is satisfies all the areas of the rubric (found on the project submission page at the end of the lesson). You should also probably remove all of the \"Tips\" like this one so that the presentation is as polished as possible.\n\n\n## Directions to Submit\n\n> Before you submit your project, you need to create a .html or .pdf version of this notebook in the workspace here. To do that, run the code cell below. If it worked correctly, you should get a return code of 0, and you should see the generated .html file in the workspace directory (click on the orange Jupyter icon in the upper left).\n\n> Alternatively, you can download this report as .html via the **File** > **Download as** submenu, and then manually upload it into the workspace directory by clicking on the orange Jupyter icon in the upper left, then using the Upload button.\n\n> Once you've done this, you can submit your project by clicking on the \"Submit Project\" button in the lower right here. This will create and submit a zip file with this .ipynb doc and the .html or .pdf version you created. Congratulations!", "_____no_output_____" ] ], [ [ "from subprocess import call\ncall(['python', '-m', 'nbconvert', 'Analyze_ab_test_results_notebook.ipynb'])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0132a60c1ac957f7688e86b7272530cb34da408
14,754
ipynb
Jupyter Notebook
codes/eurocodes/ec8/raw_ch3_seismic_action.ipynb
panagop/streng_jupyters
f00ed1759e80c2221382e31208cbc6b87e987d90
[ "MIT" ]
null
null
null
codes/eurocodes/ec8/raw_ch3_seismic_action.ipynb
panagop/streng_jupyters
f00ed1759e80c2221382e31208cbc6b87e987d90
[ "MIT" ]
null
null
null
codes/eurocodes/ec8/raw_ch3_seismic_action.ipynb
panagop/streng_jupyters
f00ed1759e80c2221382e31208cbc6b87e987d90
[ "MIT" ]
null
null
null
22.909938
165
0.440423
[ [ [ "# Eurocode 8 - Chapter 3 - seismic_action\n\nraw functions", "_____no_output_____" ] ], [ [ "from streng.codes.eurocodes.ec8.raw.ch3.seismic_action import spectra", "_____no_output_____" ] ], [ [ "## spectra", "_____no_output_____" ], [ "### αg", "_____no_output_____" ] ], [ [ "print(spectra.αg.__doc__)", "\n\n Args:\n αgR (float): reference peak ground acceleration on type A ground\n γI: (float): importance factor\n\n Returns:\n float: design ground acceleration on type A ground\n\n \n" ], [ "αg = spectra.αg(αgR=0.24,\n γI=1.20)\nprint(f'αg = {αg}g')", "αg = 0.288g\n" ] ], [ [ "### S", "_____no_output_____" ] ], [ [ "print(spectra.S.__doc__)", "\n\n Args:\n ground_type (str): Ground type (A, B, C, D or E)\n spectrum_type (int): Spectrum type 1 or 2\n\n Returns:\n float: soil factor\n\n \n" ], [ "S = spectra.S(ground_type='B',\n spectrum_type=1)\nprint(f'S = {S}')", "S = 1.2\n" ] ], [ [ "### TB", "_____no_output_____" ] ], [ [ "print(spectra.TB.__doc__)", "\n\n Args:\n ground_type (str): Ground type (A, B, C, D or E)\n spectrum_type (int): Spectrum type 1 or 2\n\n Returns:\n float: The lower limit of the period of the constant spectral acceleration branch\n\n \n" ], [ "TB = spectra.TB(ground_type='B',\n spectrum_type=1)\nprint(f'TB = {TB}')", "TB = 0.15\n" ] ], [ [ "### TC", "_____no_output_____" ] ], [ [ "print(spectra.TC.__doc__)", "\n\n Args:\n ground_type (str): Ground type (A, B, C, D or E)\n spectrum_type (int): Spectrum type 1 or 2\n\n Returns:\n float: The upper limit of the period of the constant spectral acceleration branch\n\n \n" ], [ "TC = spectra.TC(ground_type='B',\n spectrum_type=1)\nprint(f'TC = {TC}')", "TC = 0.5\n" ] ], [ [ "### TD", "_____no_output_____" ] ], [ [ "print(spectra.TD.__doc__)", "\n\n Args:\n ground_type (str): Ground type (A, B, C, D or E)\n spectrum_type (int): Spectrum type 1 or 2\n national_annex (str): Country national annex. Options are: 'default', 'greek'\n\n Returns:\n float: The value defining the beginning of the constant displacement response range of the spectrum\n\n \n" ], [ "TD = spectra.TD(ground_type='B',\n spectrum_type=1)\nprint(f'TD = {TD}')", "TD = 2.0\n" ] ], [ [ "### Se", "_____no_output_____" ] ], [ [ "print(spectra.Se.__doc__)", "\n\n Args:\n T (float): The vibration period of a linear single-degree-of-freedom system\n αg (float): The design ground acceleration on type A ground (ag = γI*agR)\n S (float): The soil factor\n TB (float): The lower limit of the period of the constant spectral acceleration branch\n TC (float): The upper limit of the period of the constant spectral acceleration branch\n TD (float): The value defining the beginning of the constant displacement response range of the spectrum\n η (float): The damping correction factor with a reference value of η = 1 for 5% viscous damping\n\n Returns:\n float: The elastic acceleration response spectrum. Given using the expressions:\n\n .. math::\n :nowrap:\n\n \\begin{eqnarray}\n 0 \\le T \\le T_B \\rightarrow & S_e(T) & = α_g\\cdot S \\cdot (1+\\dfrac{T}{T_B}\\cdot(η\\cdot 2.5 -1)) \\\\\n T_B \\le T \\le T_C \\rightarrow & S_e(T) & = α_g\\cdot S \\cdot η\\cdot 2.5 \\\\\n T_C \\le T \\le T_D \\rightarrow & S_e(T) & = α_g\\cdot S \\cdot η\\cdot 2.5\\cdot \\dfrac{T_C}{T} \\\\\n T_D \\le T \\le 4s \\rightarrow & S_e(T) & = α_g\\cdot S \\cdot η\\cdot 2.5\\cdot \\dfrac{T_C\\cdot T_D}{T^2}\n \\end{eqnarray}\n\n \n" ], [ "Se = spectra.Se(T=0.50,\n αg = 0.24,\n S=1.20,\n TB=0.15,\n TC=0.50,\n TD=2.0,\n η=1.0)\nprint(f'Se = {Se}g')", "Se = 0.72g\n" ] ], [ [ "### SDe", "_____no_output_____" ] ], [ [ "print(spectra.SDe.__doc__)", "\n\n Args:\n T (float): The vibration period of a linear single-degree-of-freedom system\n Se (float):The elastic acceleration response spectrum\n\n Returns:\n float: The elastic displacement response spectrum. Given using the expression:\n\n .. math::\n S_{De}=S_e(T)\\cdot(\\dfrac{T}{2π})^2\n\n \n" ], [ "Sde = spectra.SDe(T=0.5,\n Se=0.72*9.81)\nprint(f'Sde = {Sde:.3f}m')", "Sde = 0.045m\n" ] ], [ [ "### dg", "_____no_output_____" ] ], [ [ "print(spectra.dg.__doc__)", "\n\n Args:\n αg (float): The design ground acceleration on type A ground (ag = γI*agR)\n S (float): The soil factor\n TC (float): The upper limit of the period of the constant spectral acceleration branch\n TD (float): The value defining the beginning of the constant displacement response range of the spectrum\n\n Returns:\n float: Design ground displacement. Given using the expression:\n\n .. math::\n d_{g}=0.025\\cdot α_g \\cdot S \\cdot T_C \\cdot T_D\n\n \n" ], [ "dg = spectra.dg(αg=0.24,\n S=1.20,\n TC=0.50,\n TD=2.0)\nprint(f'dg = {dg:.4f}g')", "dg = 0.0072g\n" ] ], [ [ "### Sd", "_____no_output_____" ] ], [ [ "print(spectra.Sd.__doc__)", "\n\n Args:\n T (float): The vibration period of a linear single-degree-of-freedom system\n αg (float): The design ground acceleration on type A ground (ag = γI*agR)\n S (float): The soil factor\n TB (float): The lower limit of the period of the constant spectral acceleration branch\n TC (float): The upper limit of the period of the constant spectral acceleration branch\n TD (float): The value defining the beginning of the constant displacement response range of the spectrum\n q (float): The behaviour factor\n β (float): The lower bound factor for the horizontal design spectrum. Recommended value for β is 0.2\n\n Returns:\n float: Design spectrum for elastic analyses. Given using the expressions:\n\n .. math::\n :nowrap:\n\n \\begin{eqnarray}\n 0 \\le T \\le T_B \\rightarrow & S_d(T) & = α_g\\cdot S \\cdot (\\dfrac{2}{3}+\\dfrac{T}{T_B}\\cdot(\\dfrac{2.5}{q} - \\dfrac{2}{3})) \\\\\n T_B \\le T \\le T_C \\rightarrow & S_d(T) & = α_g\\cdot S \\cdot \\dfrac{2.5}{q} \\\\\n T_C \\le T \\le T_D \\rightarrow & S_d(T) & = α_g\\cdot S \\cdot \\dfrac{2.5}{q} \\cdot \\dfrac{T_C}{T} \\ge β \\cdot α_g \\\\\n T_D \\le T \\le 4s \\rightarrow & S_d(T) & = α_g\\cdot S \\cdot \\dfrac{2.5}{q} \\cdot \\dfrac{T_C\\cdot T_D}{T^2} \\ge β \\cdot α_g\n \\end{eqnarray}\n\n \n" ], [ "Sd = spectra.Sd(T=0.50,\n αg = 0.24,\n S=1.20,\n TB=0.15,\n TC=0.50,\n TD=2.0,\n q=3.9,\n β=0.20)\nprint(f'Sd = {Sd:.3f}g')", "Sd = 0.185g\n" ] ], [ [ "### η", "_____no_output_____" ] ], [ [ "print(spectra.η.__doc__)", "\n\n Args:\n ξ (float): the viscous damping ratio of the structure[%]\n\n Returns:\n float: The value of the damping correction factor. Given using the expressions:\n\n .. math::\n η = \\sqrt{\\dfrac{10}{5+ξ}} \\ge 0.55\n \n" ], [ "η_5 = spectra.η(5)\nprint(f'η(5%) = {η_5:.2f}')\n\nη_7 = spectra.η(7)\nprint(f'η(7%) = {η_7:.2f}')", "η(5%) = 1.00\nη(7%) = 0.91\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d01332ac122762b35bd68d3d3fcbe11f36424de2
15,100
ipynb
Jupyter Notebook
sandpit/standalone_vkdv_convergence.ipynb
mrayson/iwaves
ddb6acc017a22896484fcd4c1058210e6223fde0
[ "BSD-2-Clause" ]
null
null
null
sandpit/standalone_vkdv_convergence.ipynb
mrayson/iwaves
ddb6acc017a22896484fcd4c1058210e6223fde0
[ "BSD-2-Clause" ]
3
2020-08-31T02:50:39.000Z
2020-08-31T03:26:33.000Z
tests/standalone_vkdv_convergence.ipynb
iosonobert/iwaves
143563bc9075d1e42e486a064f1fefa67ed84702
[ "BSD-2-Clause" ]
5
2020-08-31T02:04:41.000Z
2022-02-27T06:38:00.000Z
29.841897
152
0.496358
[ [ [ "# Standalone Convergence Checker for the numerical vKdV solver\n\nCopied from Standalone Convergence Checker for the numerical KdV solver - just add bathy\n\nDoes not save or require any input data", "_____no_output_____" ] ], [ [ "import xarray as xr\nfrom iwaves.kdv.kdvimex import KdVImEx#from_netcdf\nfrom iwaves.kdv.vkdv import vKdV \nfrom iwaves.kdv.solve import solve_kdv\n#from iwaves.utils.plot import vKdV_plot\nimport iwaves.utils.initial_conditions as ics\n\nimport numpy as np\nfrom scipy.interpolate import PchipInterpolator as pchip\nimport matplotlib.pyplot as plt\n\n%matplotlib inline\n\nfrom matplotlib import rcParams\n\n# Set font sizes\nrcParams['font.family'] = 'sans-serif'\nrcParams['font.sans-serif'] = ['Bitstream Vera Sans']\nrcParams['font.serif'] = ['Bitstream Vera Sans']\nrcParams[\"font.size\"] = \"14\"\nrcParams['axes.labelsize']='large'", "_____no_output_____" ], [ "# CONSTANTS FOR WHOLE NOTEBOOK\nd = 252.5\nL_d = 3.0e5\nNz = 100\n\n# Functions\n\ndef run_kdv(args):\n \"\"\"\n Main function for generating different soliton scenarios\n \"\"\"\n rho_params, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw = args\n ####################################################\n # Inputs\n mode = 0\n \n Nz = 100\n\n ntout = 1800.0\n\n z = np.linspace(0, -d, Nz)\n\n dz = np.abs(z[1]-z[0])\n \n x = np.arange(-2*dx,L_d+dx,dx)\n \n h = ics.depth_tanh2(bathy_params, x)\n\n kdvargs = dict(\\\n verbose=False,\\\n a0=a0,\\\n Lw=Lw,\\\n mode=mode,\n dt=dt,\\\n nu_H=nu_H,\\\n ekdv=False,\\\n wavefunc=ics.eta_fullsine,\\\n #L_d = L_d,\n x=x,\\\n Nsubset=10,\n )\n\n ###\n# THIS WAS COPIED FROM THE KdV VERSION. IT INITIALISES EACH vKdV 3 TIMES - QUITE SLOW. \n ###\n \n ii=0\n #rhoz = single_tanh_rho(\n # z, pp['rho0'][ii], pp['drho1'][ii], pp['z1'][ii], pp['h1'][ii])\n rhoz = ics.rho_double_tanh_rayson(rho_params,z)\n ######\n\n ## Call the vKdV run function\n mykdv, Bda, density = solve_kdv(rhoz, z, runtime,\\\n solver='vkdv', h=h, ntout=ntout, outfile=None, **kdvargs)\n\n print('Done with dx={} and dt={}'.format(dx, dt))\n \n return mykdv, Bda", "_____no_output_____" ], [ "dx = 10\nx = np.arange(-2*dx,L_d+dx,dx)\nbathy_params = [L_d*0.6, 50000, d+50, d-50] \n\nh = ics.depth_tanh2(bathy_params, x)\n \nplt.figure(figsize=(9,5))\nplt.plot(x, h, 'k')\nplt.ylabel('h (m)')\nplt.xlabel('x (m)')\nplt.title('vKdV bathy')", "_____no_output_____" ], [ "#betas = [1023.7, 1.12, 105, 52, 155, 43] # ~April 5\n#betas = [1023.5, 1.22, 67, 55, 157, 52] # ~March 1\n\nbetas_w = [1023.8229810318612,\n 0.9865506702797462,\n 143.5428700089361,\n 46.1265812512485,\n 136.66278860120943,\n 41.57014327398592] # 15 July 2016\n\nbetas_s =[1023.6834358117951,\n 1.2249066117658955,\n 156.78804559089772,\n 53.66835548728355,\n 73.14183287436342,\n 40.21031777315428] # 1st April 2017\n\na0 = 20.\nmode =0\nnu_H = 0\nruntime = 1.25*86400.\n\n# Going to make Lw an input for the vKdV as it will really speed things up. \ndx = 100\ndt = 10\n\nz = np.linspace(0, -d, Nz)\nrhoz_w = ics.rho_double_tanh_rayson(betas_w, z)\nrhoz_s = ics.rho_double_tanh_rayson(betas_s, z)\nLw_w = ics.get_Lw(rhoz_w, z, z0=max(h), mode=0)\nLw_s = ics.get_Lw(rhoz_s, z, z0=max(h), mode=0)\n\nprint(Lw_w)\nprint(Lw_s)", "_____no_output_____" ], [ "dxs =[1600,800,400,200,100,75,50,37.5,25]\ndxs =[800,400,200,100,75,50,35]\ndt = 8.\n\nall_kdv_dx_w = []\nall_kdv_dx_s = []\n\nfor dx in dxs:\n \n print(' ')\n print('Running dx={}'.format(dx))\n print(' ')\n \n mykdv, B = run_kdv( (betas_w, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_w))\n all_kdv_dx_w.append(mykdv)\n \n mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s))\n all_kdv_dx_s.append(mykdv)\n \n print(' ')\n print('Completed dx={}'.format(dx))\n print(' ')", "_____no_output_____" ], [ "plt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dx_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\n# plt.xlim((162200, 163600))\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dx_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\n# plt.xlim((162200, 163600))\nplt.ylim((-65, 40))\nplt.xlim((165000, 185000))\nplt.legend()\n", "_____no_output_____" ], [ "plt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dx_w:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dx_w:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dx_s)\n \nplt.legend()\nplt.ylim((-40, 10))\nplt.xlim((135000, 170000))", "_____no_output_____" ], [ "# Compute the errors\nX = np.arange(0,L_d, 10.)\nnx = X.shape[0]\nndx = len(dxs)\n\nsolns = np.zeros((ndx, nx))\nfor ii, mykdv in enumerate(all_kdv_dx_w):\n Fx = pchip(mykdv.x, mykdv.B)\n solns[ii,:] = Fx(X)\n\n# Compute the error between each solution\n#err = np.diff(solns, axis=0)\nerr = solns - solns[-1,:]\n\nerr_rms_w = np.linalg.norm(err, ord=2, axis=1) # L2-norm\n#err_rms_w = np.sqrt(np.mean(err**2,axis=1))\n\nsolns = np.zeros((ndx, nx))\nfor ii, mykdv in enumerate(all_kdv_dx_s):\n Fx = pchip(mykdv.x, mykdv.B)\n solns[ii,:] = Fx(X)\n\n# Compute the error between each solution\n#err = np.diff(solns, axis=0)\nerr = solns - solns[-1,:]\n\nerr_rms_s = np.linalg.norm(err, ord=2, axis=1) # L2-norm\n#err_rms_s = np.sqrt(np.mean(err**2,axis=1))\n\n", "_____no_output_____" ], [ "plt.figure(figsize=(9,8))\nplt.loglog(dxs[:-1],err_rms_s[:-1],'ko')\nplt.loglog(dxs[:-1],err_rms_w[:-1],'s', color='0.5')\nplt.xlim(2e1,2e3)\nplt.ylim(1e1,2e3)\nplt.grid(b=True)\nx0 = np.array([50,100.])\nplt.plot(x0, 100/x0[0]**2*x0**2, 'k--')\nplt.plot(x0, 100/x0[0]**1*x0**1, 'k:')\nplt.ylabel('L2-norm Error [m]')\nplt.xlabel('$\\Delta x$ [m]')\n\nalpha_s = -2*all_kdv_dx_s[0].c1*all_kdv_dx_s[0].r10 \nbeta_s = -1*all_kdv_dx_s[0].r01\nalpha_w = -2*all_kdv_dx_w[0].c1*all_kdv_dx_w[0].r10 \nbeta_w = -1*all_kdv_dx_w[0].r01\nplt.legend((r'$\\alpha$ = (%3.4f,%3.4f), $\\beta$ = (%3.4f,%3.4f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),\n r'$\\alpha$ = (%3.4f,%3.4f), $\\beta$ = (%3.4f,%3.4f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')", "_____no_output_____" ], [ "# Delta t comparison\ndts = [20,10.,5,2.5,1.25,0.6,0.3]\ndx = 50.\n\nall_kdv_dt_w = []\nall_kdv_dt_s = []\n\nfor dt in dts:\n \n print(' ')\n print('Running dt={}'.format(dt))\n print(' ')\n \n mykdv, B = run_kdv( (betas_w, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_w))\n all_kdv_dt_w.append(mykdv)\n \n mykdv, B = run_kdv( (betas_s, bathy_params, a0, L_d, mode, nu_H, dx, runtime, dt, Lw_s))\n all_kdv_dt_s.append(mykdv)\n \n \n print(' ')\n print('Completed dt={}'.format(dt))\n print(' ')\n ", "_____no_output_____" ], [ "plt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dt_s:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\n \nplt.legend()\nplt.ylim((-50, 30))\nplt.xlim((195000, 210000))", "_____no_output_____" ], [ "plt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dt_w:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\nplt.legend()\nplt.show()\n\nplt.figure(figsize=(9,5))\nfor mykdv in all_kdv_dt_w:\n plt.plot(mykdv.x, mykdv.B, label=mykdv.dt_s)\n \nplt.legend()\nplt.ylim((-30, 1))\nplt.xlim((175000, 205000))", "_____no_output_____" ], [ "# Compute the errors\nX = np.arange(0,L_d, 10.)\nnx = X.shape[0]\nndx = len(dts)\n\nsolns = np.zeros((ndx, nx))\nfor ii, mykdv in enumerate(all_kdv_dt_w):\n print(ii)\n Fx = pchip(mykdv.x, mykdv.B)\n solns[ii,:] = Fx(X)\n\n# Compute the error between each solution\n#err = np.diff(solns, axis=0)\nerr = solns - solns[-1,:]\n\nerr_rms_w_t = np.linalg.norm(err, ord=2, axis=1) # L2-norm\n#err_rms_w = np.sqrt(np.mean(err**2,axis=1))\n\nsolns = np.zeros((ndx, nx))\nfor ii, mykdv in enumerate(all_kdv_dt_s):\n print(ii)\n Fx = pchip(mykdv.x, mykdv.B)\n solns[ii,:] = Fx(X)\n\n# Compute the error between each solution\n#err = np.diff(solns, axis=0)\nerr = solns - solns[-1,:]\n\nerr_rms_s_t = np.linalg.norm(err, ord=2, axis=1) # L2-norm\n#err_rms_s = np.sqrt(np.mean(err**2,axis=1))", "_____no_output_____" ], [ "plt.figure(figsize=(12,8))\nax=plt.subplot(121)\nplt.loglog(dxs[:-1],err_rms_s[:-1],'ko', markersize=6)\nplt.loglog(dxs[:-1],err_rms_w[:-1],'s', color='0.5', markersize=4)\nplt.xlim(2e1,2e3)\nplt.ylim(1e0,2e3)\nplt.grid(b=True)\nx0 = np.array([50,100.])\nplt.plot(x0, 100/x0[0]**2*x0**2, 'k--')\nplt.plot(x0, 100/x0[0]**1*x0**1, 'k:')\nplt.ylabel('L2-norm Error [m]')\nplt.xlabel('$\\Delta x$ [m]')\n\nalpha_s = -2*all_kdv_dx_s[0].c1*all_kdv_dx_s[0].r10 \nbeta_s = -1*all_kdv_dx_s[0].r01\nalpha_w = -2*all_kdv_dx_w[0].c1*all_kdv_dx_w[0].r10 \nbeta_w = -1*all_kdv_dx_w[0].r01\n\nplt.legend((r'$\\alpha$ = (%3.3f, %3.3f), $\\beta$ = (%3.0f, %3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),\n r'$\\alpha$ = (%3.3f, %3.3f), $\\beta$ = (%3.0f, %3.0f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')\n\nplt.text(0.05,0.95,'(a)',transform=ax.transAxes)\n\n\nax=plt.subplot(122)\nplt.loglog(dts[:-1],err_rms_s_t[:-1],'kd', markersize=6)\nplt.loglog(dts[:-1],err_rms_w_t[:-1],'s', color='0.5', markersize=4)\nplt.xlim(0,0.5e2)\nplt.ylim(1e-2,1e3)\nplt.grid(b=True)\nx0 = np.array([5,20])\nplt.plot(x0, 10/x0[0]**2*x0**2, 'k--')\nplt.plot(x0, 10/x0[0]**1*x0**1, 'k:')\n#plt.ylabel('L2-norm Error [m]')\nplt.xlabel('$\\Delta t$ [s]')\n\nplt.text(0.05,0.95,'(b)',transform=ax.transAxes)\nalpha_s = -2*all_kdv_dt_s[0].c1*all_kdv_dt_s[0].r10 \nbeta_s = -1*all_kdv_dt_s[0].r01\nalpha_w = -2*all_kdv_dt_w[0].c1*all_kdv_dt_w[0].r10 \nbeta_w = -1*all_kdv_dt_w[0].r01\n\nplt.legend((r'$\\alpha$ = (%3.3f, %3.3f), $\\beta$ = (%3.0f, %3.0f)'%(min(alpha_s), max(alpha_s), min(beta_s), max(beta_s)),\n r'$\\alpha$ = (%3.3f, %3.3f), $\\beta$ = (%3.0f, %3.0f)'%(min(alpha_w), max(alpha_w), min(beta_w), max(beta_w))), loc='lower right')\n\n# plt.savefig('../FIGURES/vkdv_convergence_dxdt.png',dpi=150)\n# plt.savefig('../FIGURES/vkdv_convergence_dxdt.pdf',dpi=150)", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0133a1a374b98b94dbc1fb4fe8d3d2f7406c74b
281,400
ipynb
Jupyter Notebook
Classification_Notes/SKlearn_RandomForest_Classification.ipynb
CVanchieri/CS_Notes
af2e84f9cf5e9faecff91067a5d617f3d7dea758
[ "MIT" ]
1
2021-12-16T08:46:41.000Z
2021-12-16T08:46:41.000Z
Classification_Notes/SKlearn_RandomForest_Classification.ipynb
CVanchieri/CS_Notes
af2e84f9cf5e9faecff91067a5d617f3d7dea758
[ "MIT" ]
null
null
null
Classification_Notes/SKlearn_RandomForest_Classification.ipynb
CVanchieri/CS_Notes
af2e84f9cf5e9faecff91067a5d617f3d7dea758
[ "MIT" ]
1
2021-12-16T08:46:48.000Z
2021-12-16T08:46:48.000Z
161.076131
41,006
0.850323
[ [ [ "## Random Forest Classification\n", "_____no_output_____" ], [ "### Random Forest\n#### The fundamental idea behind a random forest is to combine many decision trees into a single model. Individually, predictions made by decision trees (or humans) may not be accurate, but combined together, the predictions will be closer to the mark on average. \n\n#### Pros \n- can handle large datasets \n- can handle missing values\n- less influenced by outliers in the data\n- no assumptions about underlying distributions in the data\n- can implicitly handle collinearity in features, highly similar features \n- work well with categorical and numerical features, mixing different range values \n\n#### Cons\n- robust algorithm makes it more complex tougher to analyze small details \n- not best to determine feature and target relationships/effects due to working with highly similar features", "_____no_output_____" ], [ "### Model Set Up\n\n#### Steps\n - load the data\n - determine regression or classification target\n - inspect, clean, organize data\n - check for, handle outliers \n - encode data if necessary \n - set features and target \n - train, test split the data \n - scale the data if necessary \n - build the model, fit on the data, run the model\n - run metrics, analyze, view results, adjust parameters, repeat until satisfied... \n", "_____no_output_____" ], [ "### Regression Models", "_____no_output_____" ], [ "#### Random Forest Classification \n1 dependent variable (binary) , 1+ independent variables (interval or ratio or categorical)\n![photo](https://upload.wikimedia.org/wikipedia/commons/7/76/Random_forest_diagram_complete.png)\n - classification predictor\n - generate reasonable predictions across a wide range of data while requiring little configuration\n\n", "_____no_output_____" ], [ "#### Classification Models\n", "_____no_output_____" ], [ "##### Import + Inspect", "_____no_output_____" ] ], [ [ "### imports ###\nimport pandas as pd\nimport numpy as np\nimport sklearn\ndf = pd.read_csv('https://raw.githubusercontent.com/CVanchieri/CS_Notes/main/Classification_Notes/bill_authentication.csv') # read in the file \nprint('data frame shape:', df.shape) # show the data frame shape \ndf.head() # show the data frame ", "data frame shape: (1372, 5)\n" ], [ "### inspecting the data ###\nprint('--- INSPECTING THE DATA --- ')\nprint('--- columns --- ')\nprint(df.columns) \nprint('--- types --- ')\nprint(df.dtypes) \nprint('--- NA counts --- ')\nprint(df.isna().sum()) \n# print('--- object descriptions --- ')\n# print(df.describe(include=object))\nprint('--- numericals descriptions --- ')\ndf.describe()", "--- INSPECTING THE DATA --- \n--- columns --- \nIndex(['Variance', 'Skewness', 'Curtosis', 'Entropy', 'Class'], dtype='object')\n--- types --- \nVariance float64\nSkewness float64\nCurtosis float64\nEntropy float64\nClass int64\ndtype: object\n--- NA counts --- \nVariance 0\nSkewness 0\nCurtosis 0\nEntropy 0\nClass 0\ndtype: int64\n--- numericals descriptions --- \n" ], [ "### view basic feature correlations ###\nprint('--- feature correlations ---')\ndf.corr()", "--- feature correlations ---\n" ], [ "### view basic feature correlations in a heatmap ###\nimport seaborn as sns \nimport matplotlib.pyplot as plt \nf, ax = plt.subplots(1, 1, figsize = (10, 7))\nprint('--- feature correlations heatmap ---')\nsns.heatmap(df.corr() , cmap = 'Wistia' , annot = True)", "--- feature correlations heatmap ---\n" ], [ "### view scatter plots for each feature vs. target ###\nimport matplotlib.pyplot as plt \ntarget_ = 'Class' # set the target\nfeatures_ = df.iloc[:, 0:4] # set the features\nprint('--- bar plots ---')\nfor feature in features_:\n figure = plt.figure\n f, ax = plt.subplots(1, 1, figsize = (10, 7))\n ax = plt.gca()\n ax.bar(df[target_], df[feature])\n ax.set_xlabel(target_)\n ax.set_ylabel(feature)\n ax.set_title(f'''{target_} vs {feature}''')\n plt.show()", "--- bar plots ---\n" ] ], [ [ "##### Encode + Clean + Organize\n\n\n", "_____no_output_____" ] ], [ [ "### encoding not necessary with this example, all are numericals ###", "_____no_output_____" ], [ "### check for outliers in the data ### \nimport matplotlib.pyplot as plt\n# view each feature in a boxplot \nfor column in df:\n plt.figure() # plot figure \n f, ax = plt.subplots(1, 1, figsize = (10, 7))\n df.boxplot([column]) # set data", "_____no_output_____" ], [ "### function to find outliers in the data ###\ndef outlier_zscore(data):\n global outliers,zscore\n outliers = [] \n zscore = [] \n threshold = 3.5 # set threshold \n mean = np.mean(data)\n std = np.std(data)\n for i in data:\n z_score = (i - mean)/std # calculate the z_score\n zscore.append(z_score) # append the score to the zscore\n if np.abs(z_score) > threshold: \n outliers.append(i) # append z_score the outliers\n print(outliers)\n return len(outliers), outliers", "_____no_output_____" ], [ "### run each feature 'wanted' through the function ### \nprint('--- possible outliers --- ')\nVariance_outliers_number, Variance_outliers = outlier_zscore(df.Variance)\nSkewness_outliers_number, Skewness_outliers = outlier_zscore(df.Skewness)\nCurtosis_outliers_number, Curtosis_outliers = outlier_zscore(df.Curtosis)\nEntropy_outliers_number, Entropy_outliers = outlier_zscore(df.Entropy)\nClass_outliers_number, Class_outliers = outlier_zscore(df.Class)", "--- possible outliers --- \n[]\n[]\n[]\n[]\n[]\n" ], [ "### removal of outliers per feature ###\nfor num, i in enumerate(df['Curtosis']): # removing the outliers of 'bmi' \n if i in Curtosis_outliers:\n df['Curtosis'][num] = 13.5 # 3.5 under the lowest outlier\nfor num, i in enumerate(df['Entropy']): # removing the outliers of 'charges'\n if i in Entropy_outliers:\n df['Entropy'][num] = -5.5 # 3.5 under the lowest outlier", "/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:4: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n after removing the cwd from sys.path.\n/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:7: SettingWithCopyWarning: \nA value is trying to be set on a copy of a slice from a DataFrame\n\nSee the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n import sys\n" ] ], [ [ "#### Random Forest Classification\n - GridSearch CV\n - RandomSearch CV", "_____no_output_____" ] ], [ [ "### copy the data frame ### \ndf1 = df.copy()", "_____no_output_____" ], [ "### split the data into features & target sets ###\nX = df1.iloc[:, 0:4].values # set the features\ny = df1.iloc[:, 4].values # set the target \nprint('--- data shapes --- ')\nprint('X shape:', X.shape) \nprint('y shape:', y.shape) ", "--- data shapes --- \nX shape: (1372, 4)\ny shape: (1372,)\n" ], [ "### set the train test split parameters ###\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # split 80/20 ", "_____no_output_____" ], [ "### feature scaling ### \nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler() # initiate the scalar \nX_train = sc.fit_transform(X_train) # fit transform the data with scalar\nX_test = sc.transform(X_test) # fit transform the data with scalar", "_____no_output_____" ], [ "### random forest classifier ###\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nmodel = RandomForestClassifier()\nmodel.fit(X_train, y_train)\ny_pred = model.predict(X_test) \n#### create data frame of predictions and results ### \ny_pred_df = pd.DataFrame(y_pred, columns=[\"Predicted_Values\" ])\ny_test_df = pd.DataFrame(np.array(y_test), columns=[\"Real_Values\"])\ndf_final = pd.concat([y_test_df , y_pred_df] , axis=1)\nprint('--- real values vs predicted values ---')\nprint(df_final.head())\n### get the model metrics ###\nprint('--- model metrics ---')\nprint('mean absolute error:', metrics.mean_absolute_error(y_test, y_pred)) # mae \nprint('mean squared error:', metrics.mean_squared_error(y_test, y_pred)) # mse \nprint('root mean squared error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) # rmse\nscore = metrics.r2_score(y_test , y_pred) # get the r2 score \nprint(\"r2 score = {}\".format(score)) # show the r2 score\nprint('model score=', model.score(X_train, y_train)) # show the model score \nprint(\"model accuracy= {}%\".format(score * 100)) # show the model accuracy\nprint('--- confusion matrix ---')\nprint(metrics.confusion_matrix(y_test,y_pred)) # confusion matrix\nprint('--- classification report ---') \nprint(metrics.classification_report(y_test,y_pred)) # classificatin report\nprint('model accuracy score=', metrics.accuracy_score(y_test, y_pred)) # model accuracy", "--- real values vs predicted values ---\n Real_Values Predicted_Values\n0 1 1\n1 0 0\n2 1 1\n3 0 0\n4 0 0\n--- model metrics ---\nmean absolute error: 0.014545454545454545\nmean squared error: 0.014545454545454545\nroot mean squared error: 0.12060453783110545\nr2 score = 0.9406239879088848\nmodel score= 1.0\nmodel accuracy= 94.06239879088848%\n--- confusion matrix ---\n[[155 2]\n [ 2 116]]\n--- classification report ---\n precision recall f1-score support\n\n 0 0.99 0.99 0.99 157\n 1 0.98 0.98 0.98 118\n\n accuracy 0.99 275\n macro avg 0.99 0.99 0.99 275\nweighted avg 0.99 0.99 0.99 275\n\nmodel accuracy score= 0.9854545454545455\n" ], [ "### visualize the model prediction accuracy ###\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n### configure the plot ###\nprint('--- distplot accuracy --- ')\nf, ax = plt.subplots(1, 1, figsize = (10, 7))\nax1 = sns.distplot(y_test, hist=False, color=\"b\", label=\"Actual Values\")\nsns.distplot(y_pred, hist=False, color=\"r\", label=\"Predicted Values\" , axlabel='Charges', ax=ax1)\nplt.legend()", "--- distplot accuracy --- \n" ] ], [ [ "###### GridSearch CV", "_____no_output_____" ] ], [ [ "### copy the data frame ###\ndf2 = df.copy()", "_____no_output_____" ], [ "### split the data into features & target sets ###\n# for single regression select 1 feature\nX = df2.iloc[:, 0:4].values # set the features\ny = df2.iloc[:, 4].values # set the target \nprint('--- data shapes --- ')\nprint('X shape:', X.shape) \nprint('y shape:', y.shape) ", "--- data shapes --- \nX shape: (1372, 4)\ny shape: (1372,)\n" ], [ "### set the train test split parameters ###\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # split 80/20 ", "_____no_output_____" ], [ "### feature scaling ### \nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler() # initiate the scalar \nX_train = sc.fit_transform(X_train) # fit transform the data with scalar\nX_test = sc.transform(X_test) # fit transform the data with scalar", "_____no_output_____" ], [ "### random forest classifier + gridsearch CV model ###\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nmodel1 = RandomForestClassifier()\nparam_grid = { # create the param grid \n 'n_estimators': [20, 100, 200],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_leaf_nodes' : [2, 6, 10],\n 'max_depth' : [5, 15, 25],\n 'min_samples_split' : [2, 10, 15],\n # 'bootstrap': [True, False],\n # 'ccp_alpha': [0.0, 0.25, 0.50],\n # 'criterion': 'mse',\n # 'max_samples': [2, 10, 15],\n # 'min_impurity_decrease': [0.0, 0.25, 0.50],\n # 'min_impurity_split': [2, 10, 15],\n # 'min_samples_leaf': [1, 5, 10],\n # 'min_weight_fraction_leaf': [0.0, 0.25, 0.50],\n # 'n_jobs': [1, 2, 5],\n # 'oob_score': [True, False],\n # 'random_state': [0, 2, 4],\n # 'verbose': [1],\n # 'warm_start': [True, False] \n }\nCV_rfc = GridSearchCV(estimator=model1, param_grid=param_grid, cv=3) \nprint('--- model runtime --- ')\n%time CV_rfc.fit(X_train, y_train)\nprint('--- best params --- ')\nCV_rfc.best_params_", "--- model runtime --- \nCPU times: user 2min 10s, sys: 501 ms, total: 2min 11s\nWall time: 2min 11s\n--- best params --- \n" ], [ "", "_____no_output_____" ], [ "### random forest classifier + grid best params ###\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nmodel1 = RandomForestClassifier(\n max_depth= 25,\n max_features= 'log2',\n max_leaf_nodes= 10,\n min_samples_split= 2,\n n_estimators= 20\n )\nprint('--- model runtime --- ')\n%time model1.fit(X_train, y_train)\ny_pred = model1.predict(X_test) \n#### create data frame of predictions and results ### \ny_pred_df = pd.DataFrame(y_pred, columns=[\"Predicted_Values\" ])\ny_test_df = pd.DataFrame(np.array(y_test), columns=[\"Real_Values\"])\ndf_final = pd.concat([y_test_df , y_pred_df] , axis=1)\nprint('--- real values vs predicted values ---')\nprint(df_final.head())\n### get the model1 metrics ###\nprint('--- model metrics ---')\nprint('mean absolute error:', metrics.mean_absolute_error(y_test, y_pred)) # mae \nprint('mean squared error:', metrics.mean_squared_error(y_test, y_pred)) # mse \nprint('root mean squared error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) # rmse\nscore = metrics.r2_score(y_test , y_pred) # get the r2 score \nprint(\"r2 score = {}\".format(score)) # show the r2 score\nprint('model score=', model1.score(X_train, y_train)) # show the model score \nprint(\"model accuracy= {}%\".format(score * 100)) # show the model accuracy\nprint('--- confusion matrix ---')\nprint(metrics.confusion_matrix(y_test,y_pred)) # confusion matrix\nprint('--- classification report ---') \nprint(metrics.classification_report(y_test,y_pred)) # classificatin report\nprint('model1 accuracy score=', metrics.accuracy_score(y_test, y_pred)) # model accuracy", "--- real values vs predicted values ---\n Real_Values Predicted_Values\n0 1 1\n1 0 0\n2 1 1\n3 0 0\n4 0 0\n--- model metrics ---\nmean absolute error: 0.03272727272727273\nmean squared error: 0.03272727272727273\nroot mean squared error: 0.18090680674665818\nr2 score = 0.8664039727949908\nmodel score= 0.9790337283500455\nmodel accuracy= 86.64039727949908%\n--- confusion matrix ---\n[[150 7]\n [ 2 116]]\n--- classification report ---\n precision recall f1-score support\n\n 0 0.99 0.96 0.97 157\n 1 0.94 0.98 0.96 118\n\n accuracy 0.97 275\n macro avg 0.96 0.97 0.97 275\nweighted avg 0.97 0.97 0.97 275\n\nmodel1 accuracy score= 0.9672727272727273\n" ], [ "### visualize the model prediction accuracy ###\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n### configure the plot ###\nprint('--- distplot accuracy --- ')\nf, ax = plt.subplots(1, 1, figsize = (10, 7))\nax1 = sns.distplot(y_test, hist=False, color=\"b\", label=\"Actual Values\")\nsns.distplot(y_pred, hist=False, color=\"r\", label=\"Predicted Values\" , axlabel='Charges', ax=ax1)\nplt.legend()", "--- distplot accuracy --- \n" ] ], [ [ "###### RandomSearch CV", "_____no_output_____" ] ], [ [ "### copy the data frame ###\ndf3 = df.copy()", "_____no_output_____" ], [ "### split the data into features & target sets ###\n# for single regression select the 1 feature\nX = df3.iloc[:, 0:4].values # set the features\ny = df3.iloc[:, 4].values # set the target \nprint('--- data shapes --- ')\nprint('X shape:', X.shape) # show the shape \nprint('y shape:', y.shape) # show the shape ", "--- data shapes --- \nX shape: (1372, 4)\ny shape: (1372,)\n" ], [ "### set the train test split parameters ###\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0) # split 80/20", "_____no_output_____" ], [ "### feature scaling ### \nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler() # initiate the scalar \nX_train = sc.fit_transform(X_train) # fit transform the data with scalar\nX_test = sc.transform(X_test) # fit transform the data with scalar", "_____no_output_____" ], [ "### random forest classifier + randomizedsearch CV model ###\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import RandomizedSearchCV\nmodel2 = RandomForestClassifier()\nparam_grid = { # create the param grid \n 'n_estimators': [20, 100, 200],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_leaf_nodes' : [2, 6, 10],\n 'max_depth' : [5, 15, 25],\n 'min_samples_split' : [2, 10, 15],\n # 'bootstrap': [True, False],\n # 'ccp_alpha': [0.0, 0.25, 0.50],\n # 'criterion': 'mse',\n # 'max_samples': [2, 10, 15],\n # 'min_impurity_decrease': [0.0, 0.25, 0.50],\n # 'min_impurity_split': [2, 10, 15],\n # 'min_samples_leaf': [1, 5, 10],\n # 'min_weight_fraction_leaf': [0.0, 0.25, 0.50],\n # 'n_jobs': [1, 2, 5],\n # 'oob_score': [True, False],\n # 'random_state': [0, 2, 4],\n # 'verbose': [1],\n # 'warm_start': [True, False] \n }\nCV_rfc = RandomizedSearchCV(model2, param_grid, cv=3) \n%time CV_rfc.fit(X_train, y_train)\nCV_rfc.best_params_", "CPU times: user 3.64 s, sys: 19 ms, total: 3.66 s\nWall time: 3.67 s\n" ], [ "### random forest classifier + random best params ###\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import metrics\nmodel2 = RandomForestClassifier(\n max_depth= 15,\n max_features= 'auto',\n max_leaf_nodes= 10,\n min_samples_split= 15,\n n_estimators= 20\n )\nprint('--- model runtime --- ')\n%time model2.fit(X_train, y_train)\ny_pred = model2.predict(X_test) \n#### create data frame of predictions and results ### \ny_pred_df = pd.DataFrame(y_pred, columns=[\"Predicted_Values\" ])\ny_test_df = pd.DataFrame(np.array(y_test), columns=[\"Real_Values\"])\ndf_final = pd.concat([y_test_df , y_pred_df] , axis=1)\nprint('--- real values vs predicted values ---')\nprint(df_final.head())\n### get the model2 metrics ###\nprint('--- model metrics ---')\nprint('mean absolute error:', metrics.mean_absolute_error(y_test, y_pred)) # mae \nprint('mean squared error:', metrics.mean_squared_error(y_test, y_pred)) # mse \nprint('root mean squared error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred))) # rmse\nscore = metrics.r2_score(y_test , y_pred) # get the r2 score \nprint(\"r2 score = {}\".format(score)) # show the r2 score\nprint('model score=', model2.score(X_train, y_train)) # show the model score \nprint(\"model accuracy= {}%\".format(score * 100)) # show the model accuracy\nprint('--- confusion matrix ---')\nprint(metrics.confusion_matrix(y_test,y_pred)) # confusion matrix\nprint('--- classification report ---') \nprint(metrics.classification_report(y_test,y_pred)) # classificatin report\nprint('model2 accuracy score=', metrics.accuracy_score(y_test, y_pred)) # model accuracy", "--- model runtime --- \nCPU times: user 45.8 ms, sys: 998 µs, total: 46.8 ms\nWall time: 48.1 ms\n--- real values vs predicted values ---\n Real_Values Predicted_Values\n0 1 1\n1 0 0\n2 1 1\n3 0 0\n4 0 0\n--- model metrics ---\nmean absolute error: 0.03272727272727273\nmean squared error: 0.03272727272727273\nroot mean squared error: 0.18090680674665818\nr2 score = 0.8664039727949908\nmodel score= 0.9781221513217867\nmodel accuracy= 86.64039727949908%\n--- confusion matrix ---\n[[150 7]\n [ 2 116]]\n--- classification report ---\n precision recall f1-score support\n\n 0 0.99 0.96 0.97 157\n 1 0.94 0.98 0.96 118\n\n accuracy 0.97 275\n macro avg 0.96 0.97 0.97 275\nweighted avg 0.97 0.97 0.97 275\n\nmodel2 accuracy score= 0.9672727272727273\n" ], [ "### visualize the model prediction accuracy ###\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n### configure the plot ###\nprint('--- distplot accuracy --- ')\nf, ax = plt.subplots(1, 1, figsize = (10, 7))\nax1 = sns.distplot(y_test, hist=False, color=\"b\", label=\"Actual Values\")\nsns.distplot(y_pred, hist=False, color=\"r\", label=\"Predicted Values\" , axlabel='Charges', ax=ax1)\nplt.legend()", "--- distplot accuracy --- \n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d0134490eb7483484a3e20e24261257a27875da0
655,855
ipynb
Jupyter Notebook
RetinaNet_Video_Object_Detection.ipynb
thingumajig/colab-experiments
2198765a80854463efea2ff3e0ee52e183a290fc
[ "Apache-2.0" ]
4
2019-11-23T03:58:47.000Z
2020-06-09T15:33:44.000Z
RetinaNet_Video_Object_Detection.ipynb
thingumajig/colab-experiments
2198765a80854463efea2ff3e0ee52e183a290fc
[ "Apache-2.0" ]
null
null
null
RetinaNet_Video_Object_Detection.ipynb
thingumajig/colab-experiments
2198765a80854463efea2ff3e0ee52e183a290fc
[ "Apache-2.0" ]
2
2020-02-07T11:28:22.000Z
2020-03-19T01:06:43.000Z
872.147606
618,778
0.936637
[ [ [ "<a href=\"https://colab.research.google.com/github/thingumajig/colab-experiments/blob/master/RetinaNet_Video_Object_Detection.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "# .init", "_____no_output_____" ], [ "## setup keras-retinanet", "_____no_output_____" ] ], [ [ "!git clone https://github.com/fizyr/keras-retinanet.git\n\n%cd keras-retinanet/\n!pip install .\n\n!python setup.py build_ext --inplace", "Cloning into 'keras-retinanet'...\nremote: Enumerating objects: 4712, done.\u001b[K\nremote: Total 4712 (delta 0), reused 0 (delta 0), pack-reused 4712\u001b[K\nReceiving objects: 100% (4712/4712), 14.43 MiB | 36.84 MiB/s, done.\nResolving deltas: 100% (3128/3128), done.\n/content/keras-retinanet\nProcessing /content/keras-retinanet\nRequirement already satisfied: keras in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (2.2.4)\nCollecting keras-resnet (from keras-retinanet==0.5.0)\n Downloading https://files.pythonhosted.org/packages/76/d4/a35cbd07381139dda4db42c81b88c59254faac026109022727b45b31bcad/keras-resnet-0.2.0.tar.gz\nRequirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (1.12.0)\nRequirement already satisfied: scipy in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (1.3.0)\nRequirement already satisfied: cython in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (0.29.9)\nRequirement already satisfied: Pillow in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (4.3.0)\nRequirement already satisfied: opencv-python in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (3.4.5.20)\nRequirement already satisfied: progressbar2 in /usr/local/lib/python3.6/dist-packages (from keras-retinanet==0.5.0) (3.38.0)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.0) (1.0.9)\nRequirement already satisfied: keras-applications>=1.0.6 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.0) (1.0.7)\nRequirement already satisfied: numpy>=1.9.1 in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.0) (1.16.4)\nRequirement already satisfied: h5py in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.0) (2.8.0)\nRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from keras->keras-retinanet==0.5.0) (3.13)\nRequirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from Pillow->keras-retinanet==0.5.0) (0.46)\nRequirement already satisfied: python-utils>=2.3.0 in /usr/local/lib/python3.6/dist-packages (from progressbar2->keras-retinanet==0.5.0) (2.3.0)\nBuilding wheels for collected packages: keras-retinanet, keras-resnet\n Building wheel for keras-retinanet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/b2/9f/57/cb0305f6f5a41fc3c11ad67b8cedfbe9127775b563337827ba\n Building wheel for keras-resnet (setup.py) ... \u001b[?25l\u001b[?25hdone\n Stored in directory: /root/.cache/pip/wheels/5f/09/a5/497a30fd9ad9964e98a1254d1e164bcd1b8a5eda36197ecb3c\nSuccessfully built keras-retinanet keras-resnet\nInstalling collected packages: keras-resnet, keras-retinanet\nSuccessfully installed keras-resnet-0.2.0 keras-retinanet-0.5.0\nrunning build_ext\ncythoning keras_retinanet/utils/compute_overlap.pyx to keras_retinanet/utils/compute_overlap.c\n/usr/local/lib/python3.6/dist-packages/Cython/Compiler/Main.py:367: FutureWarning: Cython directive 'language_level' not set, using 2 for now (Py2). This will change in a later release! File: /content/keras-retinanet/keras_retinanet/utils/compute_overlap.pyx\n tree = Parsing.p_module(s, pxd, full_module_name)\nbuilding 'keras_retinanet.utils.compute_overlap' extension\ncreating build\ncreating build/temp.linux-x86_64-3.6\ncreating build/temp.linux-x86_64-3.6/keras_retinanet\ncreating build/temp.linux-x86_64-3.6/keras_retinanet/utils\nx86_64-linux-gnu-gcc -pthread -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 -fPIC -I/usr/include/python3.6m -I/usr/local/lib/python3.6/dist-packages/numpy/core/include -c keras_retinanet/utils/compute_overlap.c -o build/temp.linux-x86_64-3.6/keras_retinanet/utils/compute_overlap.o\nIn file included from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/numpy/core/include/numpy/ndarraytypes.h:1822:0\u001b[m\u001b[K,\n from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/numpy/core/include/numpy/ndarrayobject.h:12\u001b[m\u001b[K,\n from \u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/numpy/core/include/numpy/arrayobject.h:4\u001b[m\u001b[K,\n from \u001b[01m\u001b[Kkeras_retinanet/utils/compute_overlap.c:598\u001b[m\u001b[K:\n\u001b[01m\u001b[K/usr/local/lib/python3.6/dist-packages/numpy/core/include/numpy/npy_1_7_deprecated_api.h:17:2:\u001b[m\u001b[K \u001b[01;35m\u001b[Kwarning: \u001b[m\u001b[K#warning \"Using deprecated NumPy API, disable it with \" \"#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\" [\u001b[01;35m\u001b[K-Wcpp\u001b[m\u001b[K]\n #\u001b[01;35m\u001b[Kwarning\u001b[m\u001b[K \"Using deprecated NumPy API, disable it with \" \\\n \u001b[01;35m\u001b[K^~~~~~~\u001b[m\u001b[K\ncreating build/lib.linux-x86_64-3.6\ncreating build/lib.linux-x86_64-3.6/keras_retinanet\ncreating build/lib.linux-x86_64-3.6/keras_retinanet/utils\nx86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fstack-protector-strong -Wformat -Werror=format-security -Wdate-time -D_FORTIFY_SOURCE=2 build/temp.linux-x86_64-3.6/keras_retinanet/utils/compute_overlap.o -o build/lib.linux-x86_64-3.6/keras_retinanet/utils/compute_overlap.cpython-36m-x86_64-linux-gnu.so\ncopying build/lib.linux-x86_64-3.6/keras_retinanet/utils/compute_overlap.cpython-36m-x86_64-linux-gnu.so -> keras_retinanet/utils\n" ] ], [ [ "## download model", "_____no_output_____" ] ], [ [ "#!curl -LJO --output snapshots/pretrained.h5 https://github.com/fizyr/keras-retinanet/releases/download/0.5.0/resnet50_coco_best_v2.1.0.h5 \n\nimport urllib\nPRETRAINED_MODEL = './snapshots/_pretrained_model.h5'\nURL_MODEL = 'https://github.com/fizyr/keras-retinanet/releases/download/0.5.0/resnet50_coco_best_v2.1.0.h5'\nurllib.request.urlretrieve(URL_MODEL, PRETRAINED_MODEL)", "_____no_output_____" ] ], [ [ "# inference", "_____no_output_____" ], [ "## modules", "_____no_output_____" ] ], [ [ "!pwd\n#import os, sys\n#sys.path.insert(0, 'keras-retinanet')\n\n \n\n# show images inline\n%matplotlib inline\n\n# automatically reload modules when they have changed\n%load_ext autoreload\n%autoreload 2\n\nimport os\n#os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n\n# import keras\nimport keras\n\nfrom keras_retinanet import models\nfrom keras_retinanet.utils.image import read_image_bgr, preprocess_image, resize_image\nfrom keras_retinanet.utils.visualization import draw_box, draw_caption\nfrom keras_retinanet.utils.colors import label_color\n\n# import miscellaneous modules\nimport matplotlib.pyplot as plt\nimport cv2\nimport numpy as np\nimport time\n\n# set tf backend to allow memory to grow, instead of claiming everything\nimport tensorflow as tf\n\ndef get_session():\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n return tf.Session(config=config)\n\n# use this environment flag to change which GPU to use\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\n# set the modified tf session as backend in keras\nkeras.backend.tensorflow_backend.set_session(get_session())", "/content/keras-retinanet\n" ] ], [ [ "## load model", "_____no_output_____" ] ], [ [ "# %cd keras-retinanet/\nmodel_path = os.path.join('snapshots', sorted(os.listdir('snapshots'), reverse=True)[0])\nprint(model_path)\nprint(os.path.isfile(model_path))\n\n# load retinanet model\nmodel = models.load_model(model_path, backbone_name='resnet50')\n# model = models.convert_model(model)\n\n\n# load label to names mapping for visualization purposes\nlabels_to_names = {0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', \n 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', \n 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', \n 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', \n 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', \n 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', \n 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', \n 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', \n 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', \n 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', \n 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', \n 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', \n 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', \n 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', \n 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', \n 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', \n 79: 'toothbrush'}", "snapshots/_pretrained_model.h5\nTrue\nWARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nColocations handled automatically by placer.\n" ] ], [ [ "## detect objects", "_____no_output_____" ] ], [ [ "\n\ndef img_inference(img_path, threshold_score = 0.8):\n image = read_image_bgr(img_path)\n\n # copy to draw on\n draw = image.copy()\n draw = cv2.cvtColor(draw, cv2.COLOR_BGR2RGB)\n\n # preprocess image for network\n image = preprocess_image(image)\n image, scale = resize_image(image)\n\n # process image\n start = time.time()\n boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\n print(\"processing time: \", time.time() - start)\n\n # correct for image scale\n boxes /= scale\n\n # visualize detections\n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n # scores are sorted so we can break\n if score < threshold_score:\n break\n\n color = label_color(label)\n\n b = box.astype(int)\n draw_box(draw, b, color=color)\n\n caption = \"{} {:.3f}\".format(labels_to_names[label], score)\n draw_caption(draw, b, caption)\n\n plt.figure(figsize=(10, 10))\n plt.axis('off')\n plt.imshow(draw)\n plt.show()", "_____no_output_____" ], [ "img_inference('examples/000000008021.jpg')", "processing time: 7.703946113586426\n" ], [ "from tensorflow.python.client import device_lib\n\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.physical_device_desc for x in local_device_protos if x.device_type == 'GPU']\n\nGPU = get_available_gpus()[-1][0:-1]\nprint(GPU)", "device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.\n" ], [ "import glob\n \ndef create_video(img_path, name ='processed', img_ext = '*.jpg', image_size=(1280, 720)): \n _name = name + '.mp4'\n #_cap = VideoCapture(0)\n _fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n _out = cv2.VideoWriter(_name, _fourcc, 15.0, image_size)\n \n # out = cv2.VideoWriter('project.avi',cv2.VideoWriter_fourcc(*'DIVX'), 15, size)\n for filename in sorted(glob.glob(os.path.join(img_path, img_ext))):\n print(filename)\n img = cv2.imread(filename)\n _out.write(img)\n del img\n _out.release()", "_____no_output_____" ], [ "import unicodedata\nimport string\n\nvalid_filename_chars = f\"-_.() {string.ascii_letters}{string.digits}\"\nchar_limit = 255\n\n\ndef clean_filename(filename, whitelist=valid_filename_chars, replace=' '):\n # replace spaces\n for r in replace:\n filename = filename.replace(r, '_')\n\n # keep only valid ascii chars\n cleaned_filename = unicodedata.normalize('NFKD', filename).encode('ASCII', 'ignore').decode()\n\n # keep only whitelisted chars\n cleaned_filename = ''.join(c for c in cleaned_filename if c in whitelist)\n if len(cleaned_filename) > char_limit:\n print(f\"Warning, filename truncated because it was over {char_limit}. Filenames may no longer be unique\")\n return cleaned_filename[:char_limit] ", "_____no_output_____" ], [ "import colorsys\nimport random\nfrom tqdm import tqdm\n\nN = len(labels_to_names)\nHSV_tuples = [(x*1.0/N, 0.5, 0.5) for x in range(N)]\nRGB_tuples = list(map(lambda x: tuple(255*np.array(colorsys.hsv_to_rgb(*x))), HSV_tuples))\nrandom.shuffle(RGB_tuples)\n\ndef object_detect_video(video_path, out_temp_dir='tmp', video_name = 'processed', threshold = 0.6):\n cap = cv2.VideoCapture(video_path)\n \n if not os.path.exists(out_temp_dir):\n os.makedirs(out_temp_dir)\n\n tq = tqdm(total=1, unit=\"frame(s)\")\n \n counter = 0\n sum_time = 0\n video_out = None\n while(True):\n ret, draw = cap.read()\n if not ret:\n break\n bgr = cv2.cvtColor(draw, cv2.COLOR_RGB2BGR)\n\n # preprocess image for network\n image = preprocess_image(bgr)\n image, scale = resize_image(image)\n \n if counter == 0:\n height, width, channels = draw.shape\n #print(f'Shape: {width}X{height}')\n _name = video_name + '.mp4'\n _fourcc = cv2.VideoWriter_fourcc(*'MP4V')\n video_out = cv2.VideoWriter(_name, _fourcc, 20.0, (width, height))\n \n\n # process image\n start = time.time() \n boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))\n t = time.time() - start\n #print(f\"frame:{counter} processing time: {t}\")\n tq.total += 1\n # fancy way to give info without forcing a refresh\n tq.set_postfix(dir=f'frame {counter} time {sum_time}', refresh=False)\n tq.update(0) # may trigger a refresh\n\n # correct for image scale\n boxes /= scale\n \n # visualize detections\n #draw_detections(image, boxes, scores, labels, color=None, label_to_name=None, score_threshold=0.5)\n \n for box, score, label in zip(boxes[0], scores[0], labels[0]):\n if score < threshold:\n continue\n \n\n color = label_color(label)\n\n b = box.astype(int)\n draw_box(draw, b, color=color)\n\n caption = f\"{labels_to_names[label]} {score:.3f}\"\n draw_caption(draw, b, caption)\n \n if sum_time>0:\n cv2.putText(draw, \"Processing time %.2fs (%.1ffps) AVG %.2fs (%.1ffps)\"%(t,1.0/t,sum_time/counter,counter/sum_time), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 7)\n cv2.putText(draw, \"Processing time %.2fs (%.1ffps) AVG %.2fs (%.1ffps)\"%(t,1.0/t,sum_time/counter,counter/sum_time), (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 3)\n \n # cv2.imwrite(os.path.join(out_temp_dir, f'img{counter:08d}.jpg'),draw)\n video_out.write(draw)\n \n counter=counter+1\n sum_time+=t\n\n cap.release()\n video_out.release()\n cv2.destroyAllWindows()\n tq.set_postfix(dir=video_path)\n tq.close()", "_____no_output_____" ], [ "from google.colab import files\n\nuploaded = files.upload()\n\nfor fn in uploaded.keys():\n print(f'User uploaded file \"{fn}\" with length {len(uploaded[fn])} bytes')\n\n fn0 = clean_filename(fn)\n #with open(fn0, \"wb\") as df:\n # df.write(uploaded[fn])\n # df.close()\n \n object_detect_video(fn, f'{fn0}_tmp', video_name=f'{os.path.basename(fn0)}_processed', threshold = 0.5)\n #create_video(f'{fn0}_tmp')\n files.download(f'{os.path.basename(fn0)}_processed.mp4')\n ", "_____no_output_____" ], [ "# object_detect_video('Canada vs. Finland - Gold Medal Game - Game Highlights - IIHFWorlds 2019.mp4', 'video_tmp', video_name = 'processed2')", "_____no_output_____" ], [ "#sorted(glob.glob('/content/keras-retinanet/video_tmp/*.jpg'))\n#create_video('/content/keras-retinanet/video_tmp')\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d013486eff30978dcc5e9a567d974bc2d87e2552
48,779
ipynb
Jupyter Notebook
HMM TaggerPart of Speech Tagging - HMM.ipynb
Akshat2127/Part-Of-Speech-Tagging
0753b9d1abd929ed016b4632be1b708e8616e353
[ "MIT" ]
null
null
null
HMM TaggerPart of Speech Tagging - HMM.ipynb
Akshat2127/Part-Of-Speech-Tagging
0753b9d1abd929ed016b4632be1b708e8616e353
[ "MIT" ]
null
null
null
HMM TaggerPart of Speech Tagging - HMM.ipynb
Akshat2127/Part-Of-Speech-Tagging
0753b9d1abd929ed016b4632be1b708e8616e353
[ "MIT" ]
null
null
null
42.196367
600
0.581726
[ [ [ "# Project: Part of Speech Tagging with Hidden Markov Models \n---\n### Introduction\n\nPart of speech tagging is the process of determining the syntactic category of a word from the words in its surrounding context. It is often used to help disambiguate natural language phrases because it can be done quickly with high accuracy. Tagging can be used for many NLP tasks like determining correct pronunciation during speech synthesis (for example, _dis_-count as a noun vs dis-_count_ as a verb), for information retrieval, and for word sense disambiguation.\n\nIn this notebook, we'll use the [Pomegranate](http://pomegranate.readthedocs.io/) library to build a hidden Markov model for part of speech tagging using a \"universal\" tagset. Hidden Markov models have been able to achieve [>96% tag accuracy with larger tagsets on realistic text corpora](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf). Hidden Markov models have also been used for speech recognition and speech generation, machine translation, gene recognition for bioinformatics, and human gesture recognition for computer vision, and more. \n\n![](_post-hmm.png)\n", "_____no_output_____" ], [ "### The Road Ahead\nWe will complete this project in 3 steps mentioned below. The section on Step 4 includes references & resources you can use to further explore HMM taggers.\n\n- [Step 1](#Step-1:-Read-and-preprocess-the-dataset): Review the provided interface to load and access the text corpus\n- [Step 2](#Step-2:-Build-a-Most-Frequent-Class-tagger): Build a Most Frequent Class tagger to use as a baseline\n- [Step 3](#Step-3:-Build-an-HMM-tagger): Build an HMM Part of Speech tagger and compare to the MFC baseline\n- [Step 4](#Step-4:-[Optional]-Improving-model-performance): (Optional) Improve the HMM tagger", "_____no_output_____" ], [ "<div class=\"alert alert-block alert-warning\">\n**Note:** Make sure you have selected a **Python 3** kernel in Workspaces or the hmm-tagger conda environment if you are running the Jupyter server on your own machine.\n</div>", "_____no_output_____" ] ], [ [ "# Jupyter \"magic methods\" -- only need to be run once per kernel restart\n%load_ext autoreload\n%aimport helpers, tests\n%autoreload 1", "The autoreload extension is already loaded. To reload it, use:\n %reload_ext autoreload\n" ], [ "# import python modules -- this cell needs to be run again if you make changes to any of the files\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom IPython.core.display import HTML\nfrom itertools import chain\nfrom collections import Counter, defaultdict\nfrom helpers import show_model, Dataset\nfrom pomegranate import State, HiddenMarkovModel, DiscreteDistribution", "_____no_output_____" ] ], [ [ "## Step 1: Read and preprocess the dataset\n---\nWe'll start by reading in a text corpus and splitting it into a training and testing dataset. The data set is a copy of the [Brown corpus](https://en.wikipedia.org/wiki/Brown_Corpus) (originally from the [NLTK](https://www.nltk.org/) library) that has already been pre-processed to only include the [universal tagset](https://arxiv.org/pdf/1104.2086.pdf). We should get slightly higher accuracy using this simplified tagset than the same model would achieve on a larger tagset like the full [Penn treebank tagset](https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html).\n\nThe `Dataset` class provided in helpers.py will read and parse the corpus. You can generate your own datasets compatible with the reader by writing them to the following format. The dataset is stored in plaintext as a collection of words and corresponding tags. Each sentence starts with a unique identifier on the first line, followed by one tab-separated word/tag pair on each following line. Sentences are separated by a single blank line.\n\nExample from the Brown corpus. \n```\nb100-38532\nPerhaps\tADV\nit\tPRON\nwas\tVERB\nright\tADJ\n;\t.\n;\t.\n\nb100-35577\n...\n```", "_____no_output_____" ] ], [ [ "data = Dataset(\"tags-universal.txt\", \"brown-universal.txt\", train_test_split=0.8)\n\nprint(\"There are {} sentences in the corpus.\".format(len(data)))\nprint(\"There are {} sentences in the training set.\".format(len(data.training_set)))\nprint(\"There are {} sentences in the testing set.\".format(len(data.testing_set)))\n\nassert len(data) == len(data.training_set) + len(data.testing_set), \\\n \"The number of sentences in the training set + testing set should sum to the number of sentences in the corpus\"", "There are 57340 sentences in the corpus.\nThere are 45872 sentences in the training set.\nThere are 11468 sentences in the testing set.\n" ] ], [ [ "### The Dataset Interface\n\nWe can access (mostly) immutable references to the dataset through a simple interface provided through the `Dataset` class, which represents an iterable collection of sentences along with easy access to partitions of the data for training & testing. Review the reference below, to make sure you understand the interface before moving on to the next step.\n\n```\nDataset-only Attributes:\n training_set - reference to a Subset object containing the samples for training\n testing_set - reference to a Subset object containing the samples for testing\n\nDataset & Subset Attributes:\n sentences - a dictionary with an entry {sentence_key: Sentence()} for each sentence in the corpus\n keys - an immutable ordered (not sorted) collection of the sentence_keys for the corpus\n vocab - an immutable collection of the unique words in the corpus\n tagset - an immutable collection of the unique tags in the corpus\n X - returns an array of words grouped by sentences ((w11, w12, w13, ...), (w21, w22, w23, ...), ...)\n Y - returns an array of tags grouped by sentences ((t11, t12, t13, ...), (t21, t22, t23, ...), ...)\n N - returns the number of distinct samples (individual words or tags) in the dataset\n\nMethods:\n stream() - returns an flat iterable over all (word, tag) pairs across all sentences in the corpus\n __iter__() - returns an iterable over the data as (sentence_key, Sentence()) pairs\n __len__() - returns the nubmer of sentences in the dataset\n```\n\nFor example, consider a Subset, `subset`, of the sentences `{\"s0\": Sentence((\"See\", \"Spot\", \"run\"), (\"VERB\", \"NOUN\", \"VERB\")), \"s1\": Sentence((\"Spot\", \"ran\"), (\"NOUN\", \"VERB\"))}`. The subset will have these attributes:\n\n```\nsubset.keys == {\"s1\", \"s0\"} # unordered\nsubset.vocab == {\"See\", \"run\", \"ran\", \"Spot\"} # unordered\nsubset.tagset == {\"VERB\", \"NOUN\"} # unordered\nsubset.X == ((\"Spot\", \"ran\"), (\"See\", \"Spot\", \"run\")) # order matches .keys\nsubset.Y == ((\"NOUN\", \"VERB\"), (\"VERB\", \"NOUN\", \"VERB\")) # order matches .keys\nsubset.N == 7 # there are a total of seven observations over all sentences\nlen(subset) == 2 # because there are two sentences\n```\n\n<div class=\"alert alert-block alert-info\">\n**Note:** The `Dataset` class is _convenient_, but it is **not** efficient. It is not suitable for huge datasets because it stores multiple redundant copies of the same data.\n</div>", "_____no_output_____" ], [ "#### Sentences\n\n`Dataset.sentences` is a dictionary of all sentences in the training corpus, each keyed to a unique sentence identifier. Each `Sentence` is itself an object with two attributes: a tuple of the words in the sentence named `words` and a tuple of the tag corresponding to each word named `tags`.", "_____no_output_____" ] ], [ [ "key = 'b100-38532'\nprint(\"Sentence: {}\".format(key))\nprint(\"words:\\n\\t{!s}\".format(data.sentences[key].words))\nprint(\"tags:\\n\\t{!s}\".format(data.sentences[key].tags))", "Sentence: b100-38532\nwords:\n\t('Perhaps', 'it', 'was', 'right', ';', ';')\ntags:\n\t('ADV', 'PRON', 'VERB', 'ADJ', '.', '.')\n" ] ], [ [ "<div class=\"alert alert-block alert-info\">\n**Note:** The underlying iterable sequence is **unordered** over the sentences in the corpus; it is not guaranteed to return the sentences in a consistent order between calls. Use `Dataset.stream()`, `Dataset.keys`, `Dataset.X`, or `Dataset.Y` attributes if you need ordered access to the data.\n</div>\n\n#### Counting Unique Elements\n\nWe can access the list of unique words (the dataset vocabulary) via `Dataset.vocab` and the unique list of tags via `Dataset.tagset`.", "_____no_output_____" ] ], [ [ "print(\"There are a total of {} samples of {} unique words in the corpus.\"\n .format(data.N, len(data.vocab)))\nprint(\"There are {} samples of {} unique words in the training set.\"\n .format(data.training_set.N, len(data.training_set.vocab)))\nprint(\"There are {} samples of {} unique words in the testing set.\"\n .format(data.testing_set.N, len(data.testing_set.vocab)))\nprint(\"There are {} words in the test set that are missing in the training set.\"\n .format(len(data.testing_set.vocab - data.training_set.vocab)))\n\nassert data.N == data.training_set.N + data.testing_set.N, \\\n \"The number of training + test samples should sum to the total number of samples\"", "There are a total of 1161192 samples of 56057 unique words in the corpus.\nThere are 928458 samples of 50536 unique words in the training set.\nThere are 232734 samples of 25112 unique words in the testing set.\nThere are 5521 words in the test set that are missing in the training set.\n" ] ], [ [ "#### Accessing word and tag Sequences\nThe `Dataset.X` and `Dataset.Y` attributes provide access to ordered collections of matching word and tag sequences for each sentence in the dataset.", "_____no_output_____" ] ], [ [ "# accessing words with Dataset.X and tags with Dataset.Y \nfor i in range(2): \n print(\"Sentence {}:\".format(i + 1), data.X[i])\n print()\n print(\"Labels {}:\".format(i + 1), data.Y[i])\n print()", "Sentence 1: ('Mr.', 'Podger', 'had', 'thanked', 'him', 'gravely', ',', 'and', 'now', 'he', 'made', 'use', 'of', 'the', 'advice', '.')\n\nLabels 1: ('NOUN', 'NOUN', 'VERB', 'VERB', 'PRON', 'ADV', '.', 'CONJ', 'ADV', 'PRON', 'VERB', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\nSentence 2: ('But', 'there', 'seemed', 'to', 'be', 'some', 'difference', 'of', 'opinion', 'as', 'to', 'how', 'far', 'the', 'board', 'should', 'go', ',', 'and', 'whose', 'advice', 'it', 'should', 'follow', '.')\n\nLabels 2: ('CONJ', 'PRT', 'VERB', 'PRT', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'ADP', 'ADV', 'ADV', 'DET', 'NOUN', 'VERB', 'VERB', '.', 'CONJ', 'DET', 'NOUN', 'PRON', 'VERB', 'VERB', '.')\n\n" ] ], [ [ "#### Accessing (word, tag) Samples\nThe `Dataset.stream()` method returns an iterator that chains together every pair of (word, tag) entries across all sentences in the entire corpus.", "_____no_output_____" ] ], [ [ "# use Dataset.stream() (word, tag) samples for the entire corpus\nprint(\"\\nStream (word, tag) pairs:\\n\")\nfor i, pair in enumerate(data.stream()):\n print(\"\\t\", pair)\n if i > 5: break", "\nStream (word, tag) pairs:\n\n\t ('Mr.', 'NOUN')\n\t ('Podger', 'NOUN')\n\t ('had', 'VERB')\n\t ('thanked', 'VERB')\n\t ('him', 'PRON')\n\t ('gravely', 'ADV')\n\t (',', '.')\n" ] ], [ [ "\nFor both our baseline tagger and the HMM model we'll build, we need to estimate the frequency of tags & words from the frequency counts of observations in the training corpus. The next several cells will complete functions to compute the counts of several sets of counts. ", "_____no_output_____" ], [ "## Step 2: Build a Most Frequent Class tagger\n---\n\nPerhaps the simplest tagger (and a good baseline for tagger performance) is to simply choose the tag most frequently assigned to each word. This \"most frequent class\" tagger inspects each observed word in the sequence and assigns it the label that was most often assigned to that word in the corpus.", "_____no_output_____" ], [ "### IMPLEMENTATION: Pair Counts\n\nThe function below computes the joint frequency counts for two input sequences.", "_____no_output_____" ] ], [ [ "def pair_counts(sequences_A, sequences_B):\n \"\"\"Return a dictionary keyed to each unique value in the first sequence list\n that counts the number of occurrences of the corresponding value from the\n second sequences list.\n \n For example, if sequences_A is tags and sequences_B is the corresponding\n words, then if 1244 sequences contain the word \"time\" tagged as a NOUN, then\n you should return a dictionary such that pair_counts[NOUN][time] == 1244\n \"\"\"\n # TODO: Finish this function!\n pair_dict = {}\n i = 0;\n for tag in sequences_A:\n pair_dict[tag] = {}\n for word, tag in (sequences_B):\n if word in pair_dict[tag]:\n pair_dict[tag][word] = pair_dict[tag][word] + 1\n else:\n pair_dict[tag][word] = 1\n return pair_dict\n\n# Calculate C(t_i, w_i)\nemission_counts = pair_counts(data.tagset, data.stream())\n\nassert len(emission_counts) == 12, \\\n \"Uh oh. There should be 12 tags in your dictionary.\"\nassert max(emission_counts[\"NOUN\"], key=emission_counts[\"NOUN\"].get) == 'time', \\\n \"Hmmm...'time' is expected to be the most common NOUN.\"\nHTML('<div class=\"alert alert-block alert-success\">Your emission counts look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Most Frequent Class Tagger\n\nUse the `pair_counts()` function and the training dataset to find the most frequent class label for each word in the training data, and populate the `mfc_table` below. The table keys should be words, and the values should be the appropriate tag string.\n\nThe `MFCTagger` class is provided to mock the interface of Pomegranite HMM models so that they can be used interchangeably.", "_____no_output_____" ] ], [ [ "# Create a lookup table mfc_table where mfc_table[word] contains the tag label most frequently assigned to that word\nfrom collections import namedtuple\n\nFakeState = namedtuple(\"FakeState\", \"name\")\n\nclass MFCTagger:\n # NOTE: You should not need to modify this class or any of its methods\n missing = FakeState(name=\"<MISSING>\")\n \n def __init__(self, table):\n self.table = defaultdict(lambda: MFCTagger.missing)\n self.table.update({word: FakeState(name=tag) for word, tag in table.items()})\n \n def viterbi(self, seq):\n \"\"\"This method simplifies predictions by matching the Pomegranate viterbi() interface\"\"\"\n return 0., list(enumerate([\"<start>\"] + [self.table[w] for w in seq] + [\"<end>\"]))\n\n\n# calculate the frequency of each tag being assigned to each word (hint: similar, but not\n# the same as the emission probabilities) and use it to fill the mfc_table\n\nword_counts = pair_counts(data.tagset, data.training_set.stream())\n\ndef getMaxFreq(word, counts):\n maxFreq = -1\n #maxFreqTag\n for key, value in counts.items():\n if word in value:\n if value[word] > maxFreq:\n maxFreq = value[word]\n maxFreqTag = key\n return maxFreqTag\n\ndef GetVocabFrequencies(vocab, counts):\n word_freq = {}\n for word in vocab:\n word_freq[word] = getMaxFreq(word, counts)\n return word_freq\n \n\nmfc_table = GetVocabFrequencies(data.training_set.vocab, word_counts) # TODO: YOUR CODE HERE\n\n\n# DO NOT MODIFY BELOW THIS LINE\nmfc_model = MFCTagger(mfc_table) # Create a Most Frequent Class tagger instance\n\nassert len(mfc_table) == len(data.training_set.vocab), \"\"\nassert all(k in data.training_set.vocab for k in mfc_table.keys()), \"\"\nassert sum(int(k not in mfc_table) for k in data.testing_set.vocab) == 5521, \"\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger has all the correct words!</div>')", "_____no_output_____" ] ], [ [ "### Making Predictions with a Model\nThe helper functions provided below interface with Pomegranate network models & the mocked MFCTagger to take advantage of the [missing value](http://pomegranate.readthedocs.io/en/latest/nan.html) functionality in Pomegranate through a simple sequence decoding function. Run these functions, then run the next cell to see some of the predictions made by the MFC tagger.", "_____no_output_____" ] ], [ [ "def replace_unknown(sequence):\n \"\"\"Return a copy of the input sequence where each unknown word is replaced\n by the literal string value 'nan'. Pomegranate will ignore these values\n during computation.\n \"\"\"\n return [w if w in data.training_set.vocab else 'nan' for w in sequence]\n\ndef simplify_decoding(X, model):\n \"\"\"X should be a 1-D sequence of observations for the model to predict\"\"\"\n _, state_path = model.viterbi(replace_unknown(X))\n return [state[1].name for state in state_path[1:-1]] # do not show the start/end state predictions", "_____no_output_____" ] ], [ [ "### Example Decoding Sequences with MFC Tagger", "_____no_output_____" ] ], [ [ "for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, mfc_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")", "Sentence Key: b100-28144\n\nPredicted labels:\n-----------------\n['CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.']\n\nActual labels:\n--------------\n('CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.')\n\n\nSentence Key: b100-23146\n\nPredicted labels:\n-----------------\n['PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.']\n\nActual labels:\n--------------\n('PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\n\nSentence Key: b100-35462\n\nPredicted labels:\n-----------------\n['DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', '<MISSING>', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADV', 'NOUN', '.']\n\nActual labels:\n--------------\n('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\n\n" ] ], [ [ "### Evaluating Model Accuracy\n\nThe function below will evaluate the accuracy of the MFC tagger on the collection of all sentences from a text corpus. ", "_____no_output_____" ] ], [ [ "def accuracy(X, Y, model):\n \"\"\"Calculate the prediction accuracy by using the model to decode each sequence\n in the input X and comparing the prediction with the true labels in Y.\n \n The X should be an array whose first dimension is the number of sentences to test,\n and each element of the array should be an iterable of the words in the sequence.\n The arrays X and Y should have the exact same shape.\n \n X = [(\"See\", \"Spot\", \"run\"), (\"Run\", \"Spot\", \"run\", \"fast\"), ...]\n Y = [(), (), ...]\n \"\"\"\n correct = total_predictions = 0\n for observations, actual_tags in zip(X, Y):\n \n # The model.viterbi call in simplify_decoding will return None if the HMM\n # raises an error (for example, if a test sentence contains a word that\n # is out of vocabulary for the training set). Any exception counts the\n # full sentence as an error (which makes this a conservative estimate).\n try:\n most_likely_tags = simplify_decoding(observations, model)\n correct += sum(p == t for p, t in zip(most_likely_tags, actual_tags))\n except:\n pass\n total_predictions += len(observations)\n return correct / total_predictions", "_____no_output_____" ] ], [ [ "#### Evaluate the accuracy of the MFC tagger\nRun the next cell to evaluate the accuracy of the tagger on the training and test corpus.", "_____no_output_____" ] ], [ [ "mfc_training_acc = accuracy(data.training_set.X, data.training_set.Y, mfc_model)\nprint(\"training accuracy mfc_model: {:.2f}%\".format(100 * mfc_training_acc))\n\nmfc_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, mfc_model)\nprint(\"testing accuracy mfc_model: {:.2f}%\".format(100 * mfc_testing_acc))\n\nassert mfc_training_acc >= 0.955, \"Uh oh. Your MFC accuracy on the training set doesn't look right.\"\nassert mfc_testing_acc >= 0.925, \"Uh oh. Your MFC accuracy on the testing set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your MFC tagger accuracy looks correct!</div>')", "training accuracy mfc_model: 95.72%\ntesting accuracy mfc_model: 93.00%\n" ] ], [ [ "## Step 3: Build an HMM tagger\n---\nThe HMM tagger has one hidden state for each possible tag, and parameterized by two distributions: the emission probabilties giving the conditional probability of observing a given **word** from each hidden state, and the transition probabilities giving the conditional probability of moving between **tags** during the sequence.\n\nWe will also estimate the starting probability distribution (the probability of each **tag** being the first tag in a sequence), and the terminal probability distribution (the probability of each **tag** being the last tag in a sequence).\n\nThe maximum likelihood estimate of these distributions can be calculated from the frequency counts as described in the following sections where you'll implement functions to count the frequencies, and finally build the model. The HMM model will make predictions according to the formula:\n\n$$t_i^n = \\underset{t_i^n}{\\mathrm{argmax}} \\prod_{i=1}^n P(w_i|t_i) P(t_i|t_{i-1})$$\n\nRefer to Speech & Language Processing [Chapter 10](https://web.stanford.edu/~jurafsky/slp3/10.pdf) for more information.", "_____no_output_____" ], [ "### IMPLEMENTATION: Unigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each symbol over all of the input sequences. The unigram probabilities in our HMM model are estimated from the formula below, where N is the total number of samples in the input. (You only need to compute the counts for now.)\n\n$$P(tag_1) = \\frac{C(tag_1)}{N}$$", "_____no_output_____" ] ], [ [ "def unigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequence list that\n counts the number of occurrences of the value in the sequences list. The sequences\n collection should be a 2-dimensional array.\n \n For example, if the tag NOUN appears 275558 times over all the input sequences,\n then you should return a dictionary such that your_unigram_counts[NOUN] == 275558.\n \"\"\"\n unigram_counts ={}\n for i in range(len(sequences)):\n for j in range(len(sequences[i])):\n if sequences[i][j] in unigram_counts:\n unigram_counts[sequences[i][j]] = unigram_counts[sequences[i][j]] + 1\n else:\n unigram_counts[sequences[i][j]] = 1\n return unigram_counts\n \n\n# call unigram_counts with a list of tag sequences from the training set\ntag_unigrams = unigram_counts(data.training_set.Y)\n\nprint((tag_unigrams))\n\n\nassert set(tag_unigrams.keys()) == data.training_set.tagset, \\\n \"Uh oh. It looks like your tag counts doesn't include all the tags!\"\nassert min(tag_unigrams, key=tag_unigrams.get) == 'X', \\\n \"Hmmm...'X' is expected to be the least common class\"\nassert max(tag_unigrams, key=tag_unigrams.get) == 'NOUN', \\\n \"Hmmm...'NOUN' is expected to be the most common class\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag unigrams look good!</div>')", "{'ADV': 44877, 'NOUN': 220632, '.': 117757, 'VERB': 146161, 'ADP': 115808, 'ADJ': 66754, 'CONJ': 30537, 'DET': 109671, 'PRT': 23906, 'NUM': 11878, 'PRON': 39383, 'X': 1094}\n" ] ], [ [ "### IMPLEMENTATION: Bigram Counts\n\nComplete the function below to estimate the co-occurrence frequency of each pair of symbols in each of the input sequences. These counts are used in the HMM model to estimate the bigram probability of two tags from the frequency counts according to the formula: $$P(tag_2|tag_1) = \\frac{C(tag_2|tag_1)}{C(tag_2)}$$\n", "_____no_output_____" ] ], [ [ "def bigram_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique PAIR of values in the input sequences\n list that counts the number of occurrences of pair in the sequences list. The input\n should be a 2-dimensional array.\n \n For example, if the pair of tags (NOUN, VERB) appear 61582 times, then you should\n return a dictionary such that your_bigram_counts[(NOUN, VERB)] == 61582\n \"\"\"\n\n bigram_counts ={}\n for i in range(len(sequences)):\n for j in range(len(sequences[i]) - 1): \n pair = (sequences[i][j], sequences[i][j + 1])\n if pair in bigram_counts:\n bigram_counts[pair] = bigram_counts[pair] + 1\n else:\n bigram_counts[pair] = 1\n return bigram_counts\n\n# TODO: call bigram_counts with a list of tag sequences from the training set\ntag_bigrams = bigram_counts(data.training_set.Y)\n\nassert len(tag_bigrams) == 144, \\\n \"Uh oh. There should be 144 pairs of bigrams (12 tags x 12 tags)\"\nassert min(tag_bigrams, key=tag_bigrams.get) in [('X', 'NUM'), ('PRON', 'X')], \\\n \"Hmmm...The least common bigram should be one of ('X', 'NUM') or ('PRON', 'X').\"\nassert max(tag_bigrams, key=tag_bigrams.get) in [('DET', 'NOUN')], \\\n \"Hmmm...('DET', 'NOUN') is expected to be the most common bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your tag bigrams look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Sequence Starting Counts\nComplete the code below to estimate the bigram probabilities of a sequence starting with each tag.", "_____no_output_____" ] ], [ [ "def starting_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the beginning of\n a sequence.\n \n For example, if 8093 sequences start with NOUN, then you should return a\n dictionary such that your_starting_counts[NOUN] == 8093\n \"\"\"\n starting_counts = {}\n for i in range(len(sequences)):\n if sequences[i][0] in starting_counts:\n starting_counts[sequences[i][0]] = starting_counts[sequences[i][0]] + 1\n else:\n starting_counts[sequences[i][0]] = 1\n return starting_counts\n \n\n# Calculate the count of each tag starting a sequence\ntag_starts = starting_counts(data.training_set.Y)\n\nassert len(tag_starts) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_starts, key=tag_starts.get) == 'X', \"Hmmm...'X' is expected to be the least common starting bigram.\"\nassert max(tag_starts, key=tag_starts.get) == 'DET', \"Hmmm...'DET' is expected to be the most common starting bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your starting tag counts look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Sequence Ending Counts\nComplete the function below to estimate the bigram probabilities of a sequence ending with each tag.", "_____no_output_____" ] ], [ [ "def ending_counts(sequences):\n \"\"\"Return a dictionary keyed to each unique value in the input sequences list\n that counts the number of occurrences where that value is at the end of\n a sequence.\n \n For example, if 18 sequences end with DET, then you should return a\n dictionary such that your_starting_counts[DET] == 18\n \"\"\"\n ending_counts = {}\n for i in range(len(sequences)):\n last_idx = len(sequences[i]) - 1\n if sequences[i][last_idx] in ending_counts:\n ending_counts[sequences[i][last_idx]] = ending_counts[sequences[i][last_idx]] + 1\n else:\n ending_counts[sequences[i][last_idx]] = 1\n return ending_counts\n\n# Calculate the count of each tag ending a sequence\ntag_ends = ending_counts(data.training_set.Y)\n\nassert len(tag_ends) == 12, \"Uh oh. There should be 12 tags in your dictionary.\"\nassert min(tag_ends, key=tag_ends.get) in ['X', 'CONJ'], \"Hmmm...'X' or 'CONJ' should be the least common ending bigram.\"\nassert max(tag_ends, key=tag_ends.get) == '.', \"Hmmm...'.' is expected to be the most common ending bigram.\"\nHTML('<div class=\"alert alert-block alert-success\">Your ending tag counts look good!</div>')", "_____no_output_____" ] ], [ [ "### IMPLEMENTATION: Basic HMM Tagger\nUse the tag unigrams and bigrams calculated above to construct a hidden Markov tagger.\n\n- Add one state per tag\n - The emission distribution at each state should be estimated with the formula: $P(w|t) = \\frac{C(t, w)}{C(t)}$\n- Add an edge from the starting state `basic_model.start` to each tag\n - The transition probability should be estimated with the formula: $P(t|start) = \\frac{C(start, t)}{C(start)}$\n- Add an edge from each tag to the end state `basic_model.end`\n - The transition probability should be estimated with the formula: $P(end|t) = \\frac{C(t, end)}{C(t)}$\n- Add an edge between _every_ pair of tags\n - The transition probability should be estimated with the formula: $P(t_2|t_1) = \\frac{C(t_1, t_2)}{C(t_1)}$", "_____no_output_____" ] ], [ [ "basic_model = HiddenMarkovModel(name=\"base-hmm-tagger\")\n\nstates = {}\n\nfor tag in emission_counts:\n tag_count = tag_unigrams[tag]\n prob_distributuion = {word : word_count/tag_count for word, word_count in emission_counts[tag].items() }\n state = State(DiscreteDistribution(prob_distributuion), name=tag)\n states[tag] = state\n basic_model.add_states(state)\n\nfor tag_pair in tag_bigrams.keys():\n training_set_count = len(data.training_set.Y)\n \n start_prob = tag_starts[tag_pair[0]]/training_set_count\n basic_model.add_transition(basic_model.start, states[tag_pair[0]], start_prob)\n \n trans_prob = tag_bigrams[tag_pair]/tag_unigrams[tag_pair[0]]\n basic_model.add_transition(states[tag_pair[0]], states[tag_pair[1]], trans_prob)\n \n end_prob = tag_ends[tag_pair[0]]/training_set_count\n basic_model.add_transition(states[tag_pair[0]], basic_model.end, end_prob)\n\nbasic_model.bake()\n\nassert all(tag in set(s.name for s in basic_model.states) for tag in data.training_set.tagset), \\\n \"Every state in your network should use the name of the associated tag, which must be one of the training set tags.\"\nassert basic_model.edge_count() == 168, \\\n (\"Your network should have an edge from the start node to each state, one edge between every \" +\n \"pair of tags (states), and an edge from each state to the end node.\")\nHTML('<div class=\"alert alert-block alert-success\">Your HMM network topology looks good!</div>')", "_____no_output_____" ], [ "hmm_training_acc = accuracy(data.training_set.X, data.training_set.Y, basic_model)\nprint(\"training accuracy basic hmm model: {:.2f}%\".format(100 * hmm_training_acc))\n\nhmm_testing_acc = accuracy(data.testing_set.X, data.testing_set.Y, basic_model)\nprint(\"testing accuracy basic hmm model: {:.2f}%\".format(100 * hmm_testing_acc))\n\nassert hmm_training_acc > 0.97, \"Uh oh. Your HMM accuracy on the training set doesn't look right.\"\nassert hmm_testing_acc > 0.955, \"Uh oh. Your HMM accuracy on the testing set doesn't look right.\"\nHTML('<div class=\"alert alert-block alert-success\">Your HMM tagger accuracy looks correct! Congratulations, you\\'ve finished the project.</div>')", "training accuracy basic hmm model: 97.54%\ntesting accuracy basic hmm model: 96.18%\n" ] ], [ [ "### Example Decoding Sequences with the HMM Tagger", "_____no_output_____" ] ], [ [ "for key in data.testing_set.keys[:3]:\n print(\"Sentence Key: {}\\n\".format(key))\n print(\"Predicted labels:\\n-----------------\")\n print(simplify_decoding(data.sentences[key].words, basic_model))\n print()\n print(\"Actual labels:\\n--------------\")\n print(data.sentences[key].tags)\n print(\"\\n\")", "Sentence Key: b100-28144\n\nPredicted labels:\n-----------------\n['CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.']\n\nActual labels:\n--------------\n('CONJ', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'NOUN', 'NUM', '.', 'CONJ', 'NOUN', 'NUM', '.', '.', 'NOUN', '.', '.')\n\n\nSentence Key: b100-23146\n\nPredicted labels:\n-----------------\n['PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.']\n\nActual labels:\n--------------\n('PRON', 'VERB', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', 'NOUN', 'VERB', 'VERB', '.', 'ADP', 'VERB', 'DET', 'NOUN', 'ADP', 'NOUN', 'ADP', 'DET', 'NOUN', '.')\n\n\nSentence Key: b100-35462\n\nPredicted labels:\n-----------------\n['DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.']\n\nActual labels:\n--------------\n('DET', 'ADJ', 'NOUN', 'VERB', 'VERB', 'VERB', 'ADP', 'DET', 'ADJ', 'ADJ', 'NOUN', 'ADP', 'DET', 'ADJ', 'NOUN', '.', 'ADP', 'ADJ', 'NOUN', '.', 'CONJ', 'ADP', 'DET', 'NOUN', 'ADP', 'ADJ', 'ADJ', '.', 'ADJ', '.', 'CONJ', 'ADJ', 'NOUN', 'ADP', 'ADJ', 'NOUN', '.')\n\n\n" ] ], [ [ "## Step 4: [Optional] Improving model performance\n---\nThere are additional enhancements that can be incorporated into your tagger that improve performance on larger tagsets where the data sparsity problem is more significant. The data sparsity problem arises because the same amount of data split over more tags means there will be fewer samples in each tag, and there will be more missing data tags that have zero occurrences in the data. The techniques in this section are optional.\n\n- [Laplace Smoothing](https://en.wikipedia.org/wiki/Additive_smoothing) (pseudocounts)\n Laplace smoothing is a technique where you add a small, non-zero value to all observed counts to offset for unobserved values.\n\n- Backoff Smoothing\n Another smoothing technique is to interpolate between n-grams for missing data. This method is more effective than Laplace smoothing at combatting the data sparsity problem. Refer to chapters 4, 9, and 10 of the [Speech & Language Processing](https://web.stanford.edu/~jurafsky/slp3/) book for more information.\n\n- Extending to Trigrams\n HMM taggers have achieved better than 96% accuracy on this dataset with the full Penn treebank tagset using an architecture described in [this](http://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.pdf) paper. Altering your HMM to achieve the same performance would require implementing deleted interpolation (described in the paper), incorporating trigram probabilities in your frequency tables, and re-implementing the Viterbi algorithm to consider three consecutive states instead of two.\n\n### Obtain the Brown Corpus with a Larger Tagset\nRun the code below to download a copy of the brown corpus with the full NLTK tagset. You will need to research the available tagset information in the NLTK docs and determine the best way to extract the subset of NLTK tags you want to explore. If you write the following the format specified in Step 1, then you can reload the data using all of the code above for comparison.\n\nRefer to [Chapter 5](http://www.nltk.org/book/ch05.html) of the NLTK book for more information on the available tagsets.", "_____no_output_____" ] ], [ [ "import nltk\nfrom nltk import pos_tag, word_tokenize\nfrom nltk.corpus import brown\n\nnltk.download('brown')\ntraining_corpus = nltk.corpus.brown\ntraining_corpus.tagged_sents()[0]", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0136958f13fc7d10180f29306d04e2ff6b79233
3,327
ipynb
Jupyter Notebook
notebooks/beginner/notebooks/for_loops.ipynb
mateodif/learn-python3
f9c4488522db6a877968759a7088e2549ca35725
[ "MIT" ]
null
null
null
notebooks/beginner/notebooks/for_loops.ipynb
mateodif/learn-python3
f9c4488522db6a877968759a7088e2549ca35725
[ "MIT" ]
null
null
null
notebooks/beginner/notebooks/for_loops.ipynb
mateodif/learn-python3
f9c4488522db6a877968759a7088e2549ca35725
[ "MIT" ]
1
2019-11-05T01:50:50.000Z
2019-11-05T01:50:50.000Z
19.12069
94
0.490833
[ [ [ "# [Bucles `for`](https://docs.python.org/3/tutorial/controlflow.html#for-statements)", "_____no_output_____" ], [ "## Iterando listas", "_____no_output_____" ] ], [ [ "mi_lista = [1, 2, 3, 4, 'Python', 'es', 'piola']\nfor item in mi_lista:\n print(item)", "_____no_output_____" ] ], [ [ "### `break`\nParar la ejecución del bucle.", "_____no_output_____" ] ], [ [ "for item in mi_lista:\n if item == 'Python':\n break\n print(item)", "_____no_output_____" ] ], [ [ "### `continue`\nContinúa al próximo item sin ejecutar las lineas después de `continue` dentro del bucle.", "_____no_output_____" ] ], [ [ "for item in mi_lista:\n if item == 1:\n continue\n print(item)", "_____no_output_____" ] ], [ [ "### `enumerate()`\nEn caso de que también necesiten saber el indice:", "_____no_output_____" ] ], [ [ "for indice, valor in enumerate(mi_lista):\n print('indice: {}, valoror: {}'.format(indice, valor))", "_____no_output_____" ] ], [ [ "## Iterando diccionarios", "_____no_output_____" ] ], [ [ "mi_dicc = {'hacker': True, 'edad': 72, 'nombre': 'John Doe'}\nfor valor in mi_dicc:\n print(valor)", "_____no_output_____" ], [ "for llave, valor in mi_dicc.items():\n print('{}={}'.format(llave, valor))", "_____no_output_____" ] ], [ [ "## `range()`", "_____no_output_____" ] ], [ [ "for numero in range(5):\n print(numero)", "_____no_output_____" ], [ "for numero in range(2, 5):\n print(numero)", "_____no_output_____" ], [ "for numero in range(0, 10, 2): # el ultimo son los pasos\n print(numero)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d0136ff8f7f8dd9395c3d101688798cd87d60021
33,648
ipynb
Jupyter Notebook
notebooks/test/002_pos_tagging-Copy1.ipynb
VictorQuintana91/Thesis
b3ebf5ceeae22836a6a4b9612389ca95e3946b3e
[ "MIT" ]
null
null
null
notebooks/test/002_pos_tagging-Copy1.ipynb
VictorQuintana91/Thesis
b3ebf5ceeae22836a6a4b9612389ca95e3946b3e
[ "MIT" ]
null
null
null
notebooks/test/002_pos_tagging-Copy1.ipynb
VictorQuintana91/Thesis
b3ebf5ceeae22836a6a4b9612389ca95e3946b3e
[ "MIT" ]
null
null
null
46.668516
1,433
0.564996
[ [ [ "# Pos-Tagging & Feature Extraction\nFollowing normalisation, we can now proceed to the process of pos-tagging and feature extraction. Let's start with pos-tagging.", "_____no_output_____" ], [ "## POS-tagging\nPart-of-speech tagging is one of the most important text analysis tasks used to classify words into their part-of-speech and label them according the tagset which is a collection of tags used for the pos tagging. Part-of-speech tagging also known as word classes or lexical categories.\n\nThe `nltk` library provides its own pre-trained `POS-tagger`. Let's see how it is used.", "_____no_output_____" ] ], [ [ "import pandas as pd\ndf0 = pd.read_csv(\"../../data/interim/001_normalised_keyed_reviews.csv\", sep=\"\\t\", low_memory=False)\ndf0.head()", "_____no_output_____" ], [ "# For monitoring duration of pandas processes\nfrom tqdm import tqdm, tqdm_pandas\n\n# To avoid RuntimeError: Set changed size during iteration\ntqdm.monitor_interval = 0\n\n# Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`\n# (can use `tqdm_gui`, `tqdm_notebook`, optional kwargs, etc.)\ntqdm.pandas(desc=\"Progress:\")\n\n# Now you can use `progress_apply` instead of `apply`\n# and `progress_map` instead of `map`\n# can also groupby:\n# df.groupby(0).progress_apply(lambda x: x**2)", "_____no_output_____" ], [ "def convert_text_to_list(review):\n return review.replace(\"[\",\"\").replace(\"]\",\"\").replace(\"'\",\"\").split(\",\")", "_____no_output_____" ], [ "# Convert \"reviewText\" field to back to list\ndf0['reviewText'] = df0['reviewText'].astype(str)\ndf0['reviewText'] = df0['reviewText'].progress_apply(lambda text: convert_text_to_list(text));\ndf0['reviewText'].head()", "Progress:: 100%|██████████| 582711/582711 [00:17<00:00, 33818.55it/s]\n" ], [ "df0['reviewText'][12]", "_____no_output_____" ], [ "import nltk\nnltk.__version__", "_____no_output_____" ], [ "# Split negs\ndef split_neg(review):\n new_review = []\n for token in review:\n if '_' in token:\n split_words = token.split(\"_\")\n new_review.append(split_words[0])\n new_review.append(split_words[1])\n else:\n new_review.append(token)\n return new_review", "_____no_output_____" ], [ "df0[\"reviewText\"] = df0[\"reviewText\"].progress_apply(lambda review: split_neg(review))\ndf0[\"reviewText\"].head()", "Progress:: 100%|██████████| 582711/582711 [00:14<00:00, 40001.85it/s]\n" ], [ "### Remove Stop Words\nfrom nltk.corpus import stopwords\nstop_words = set(stopwords.words('english'))\n\ndef remove_stopwords(review):\n return [token for token in review if not token in stop_words]", "_____no_output_____" ], [ "df0[\"reviewText\"] = df0[\"reviewText\"].progress_apply(lambda review: remove_stopwords(review))\ndf0[\"reviewText\"].head()", "Progress:: 100%|██████████| 582711/582711 [00:12<00:00, 48007.55it/s]\n" ] ], [ [ "<span style=\"color:red\">Unfortunatelly, this tagger, though much better and accurate, takes a lot of time. In order to process the above data set it would need close to 3 days of running.</span> ", "_____no_output_____" ], [ "Follow this link for more info on the tagger: https://nlp.stanford.edu/software/tagger.shtml#History", "_____no_output_____" ] ], [ [ "from nltk.tag import StanfordPOSTagger\nfrom nltk import word_tokenize\n\n# import os\n# os.getcwd()\n\n# Add the jar and model via their path (instead of setting environment variables):\njar = '../../models/stanford-postagger-full-2017-06-09/stanford-postagger.jar'\nmodel = '../../models/stanford-postagger-full-2017-06-09/models/english-left3words-distsim.tagger'\n\npos_tagger = StanfordPOSTagger(model, jar, encoding='utf8')", "_____no_output_____" ], [ "def pos_tag(review):\n if(len(review)>0):\n return pos_tagger.tag(review)", "_____no_output_____" ], [ "# Example\ntext = pos_tagger.tag(word_tokenize(\"What's the airspeed of an unladen swallow ?\"))\nprint(text)", "[('What', 'WP'), (\"'s\", 'VBZ'), ('the', 'DT'), ('airspeed', 'NN'), ('of', 'IN'), ('an', 'DT'), ('unladen', 'JJ'), ('swallow', 'VB'), ('?', '.')]\n" ], [ "tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: pos_tag(review)))\ntagged_df.head()", "Progress:: 0%| | 72/582711 [01:09<158:51:58, 1.02it/s]" ], [ "# tagged_df = pd.DataFrame(df0['reviewText'].progress_apply(lambda review: nltk.pos_tag(review)))\n# tagged_df.head()", "_____no_output_____" ] ], [ [ "Thankfully, `nltk` provides documentation for each tag, which can be queried using the tag, e.g., `nltk.help.upenn_tagset(‘RB’)`, or a regular expression. `nltk` also provides batch pos-tagging method for document pos-tagging:", "_____no_output_____" ] ], [ [ "tagged_df['reviewText'][8]", "_____no_output_____" ] ], [ [ "The list of all possible tags appears below:\n\n| Tag | Description |\n|------|------------------------------------------|\n| CC | Coordinating conjunction |\n| CD | Cardinal number |\n| DT | Determiner |\n| EX | ExistentialĘthere |\n| FW | Foreign word |\n| IN | Preposition or subordinating conjunction |\n| JJ | Adjective |\n| JJR | Adjective, comparative |\n| JJS | Adjective, superlative |\n| LS | List item marker |\n| MD | Modal |\n| NN | Noun, singular or mass |\n| NNS | Noun, plural |\n| NNP | Proper noun, singular |\n| NNPS | Proper noun, plural |\n| PDT | Predeterminer |\n| POS | Possessive ending |\n| PRP | Personal pronoun |\n| PRP* | Possessive pronoun |\n| RB | Adverb |\n| RBR | Adverb, comparative |\n| RBS | Adverb, superlative |\n| RP | Particle |\n| SYM | Symbol |\n| TO | to |\n| UH | Interjection |\n| VB | Verb, base form |\n| VBD | Verb, past tense |\n| VBG | Verb, gerund or present participle |\n| VBN | Verb, past participle |\n| VBP | Verb, non-3rd person singular present |\n| VBZ | Verb, 3rd person singular present |\n| WDT | Wh-determiner |\n| WP | Wh-pronoun |\n| WP* | Possessive wh-pronoun |\n| WRB | Wh-adverb |\n\nNotice: where you see `*` replace with `$`.", "_____no_output_____" ] ], [ [ "## Join with Original Key and Persist Locally to avoid RE-processing\nuniqueKey_series_df = df0[['uniqueKey']]\nuniqueKey_series_df.head()", "_____no_output_____" ], [ "pos_tagged_keyed_reviews = pd.concat([uniqueKey_series_df, tagged_df], axis=1);\npos_tagged_keyed_reviews.head()", "_____no_output_____" ], [ "pos_tagged_keyed_reviews.to_csv(\"../data/interim/002_pos_tagged_keyed_reviews.csv\", sep='\\t', header=True, index=False);", "_____no_output_____" ] ], [ [ "## Nouns\nNouns generally refer to people, places, things, or concepts, e.g.: woman, Scotland, book, intelligence. Nouns can appear after determiners and adjectives, and can be the subject or object of the verb.\n\nThe simplified noun tags are `N` for common nouns like book, and `NP` for proper nouns like Scotland.", "_____no_output_____" ] ], [ [ "def noun_collector(word_tag_list):\n if(len(word_tag_list)>0):\n return [word for (word, tag) in word_tag_list if tag in {'NN', 'NNS', 'NNP', 'NNPS'}]", "_____no_output_____" ], [ "nouns_df = pd.DataFrame(tagged_df['reviewText'].progress_apply(lambda review: noun_collector(review)))\nnouns_df.head()", "_____no_output_____" ], [ "keyed_nouns_df = pd.concat([uniqueKey_series_df, nouns_df], axis=1);\nkeyed_nouns_df.head()", "_____no_output_____" ], [ "keyed_nouns_df.to_csv(\"../../data/interim/002_keyed_nouns_stanford.csv\", sep='\\t', header=True, index=False);", "_____no_output_____" ], [ "## END_OF_FILE", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ] ]
d013712b32bea3758ee9b4bbdb056e593380c691
5,766
ipynb
Jupyter Notebook
examples/Interactive/Basic/test_plan_notebook.ipynb
armarti/testplan
5dcfe5840c0c99e9535cc223230f400fa62802f2
[ "Apache-2.0" ]
null
null
null
examples/Interactive/Basic/test_plan_notebook.ipynb
armarti/testplan
5dcfe5840c0c99e9535cc223230f400fa62802f2
[ "Apache-2.0" ]
64
2019-04-15T20:56:40.000Z
2021-03-23T01:00:30.000Z
examples/Interactive/Basic/test_plan_notebook.ipynb
armarti/testplan
5dcfe5840c0c99e9535cc223230f400fa62802f2
[ "Apache-2.0" ]
null
null
null
21.514925
101
0.542664
[ [ [ "import pprint\n\nfrom testplan import Testplan\nfrom testplan.common.utils.logger import TEST_INFO, DEBUG\nfrom my_tests.mtest import make_multitest", "_____no_output_____" ], [ "# Initialize a plan with interactive mode flag set.\nplan = Testplan(name='MyPlan',\n interactive=True,\n parse_cmdline=False,\n logger_level=TEST_INFO)", "_____no_output_____" ], [ "# Interactive mode serving interactive requests.\nplan.run()", "_____no_output_____" ], [ "# Adding a test.\ntest1_uid = plan.add(make_multitest(idx='1'))\nprint('Test uid: {}'.format(test1_uid))", "_____no_output_____" ], [ "print('Testplan interactive handler: '.format(plan.i))\nprint('Test1 added: {}'.format(plan.i.test(test_uid='Test1')))", "_____no_output_____" ], [ "# Run the tests added.\nplan.i.run_test(test_uid='Test1')\nprint('Test1 report passing: {}'.format(plan.i.test_report(test_uid='Test1')['status']))", "_____no_output_____" ], [ "# ACTION: Make a file edit in my_tests/dependency.py\n# VALUE = 3\n# change to:\n# VALUE = 1", "_____no_output_____" ], [ "# Reload the code after saving the fix.\nplan.i.reload()", "_____no_output_____" ], [ "# Re-run the test that now should be passing.\nplan.i.run_test(test_uid='Test1')\nprint('Test1 report passing: {}'.format(plan.i.test_report(test_uid='Test1')['status']))", "_____no_output_____" ], [ "# Adding a second test.\ntest2_uid = plan.add(make_multitest(idx='2'))\nprint('Test uid: {}'.format(test2_uid))", "_____no_output_____" ], [ "plan.i.run_tests()", "_____no_output_____" ], [ "report = plan.i.report()\nprint('Serialized tests report:')\npprint.pprint(plan.i.report(serialized=True))", "_____no_output_____" ], [ "plan.i.reset_reports()", "_____no_output_____" ], [ "# Runs a single testsuite from first test.\nplan.i.run_test_suite(test_uid='Test1', suite_uid='BasicSuite')", "_____no_output_____" ], [ "print('Serialized Test1 report:')\npprint.pprint(plan.i.test_report(test_uid='Test1', serialized=True))", "_____no_output_____" ], [ "# Test should now be passing.\nprint('Serialized Test1 report:')\npprint.pprint(plan.i.test_report(test_uid='Test1', serialized=True))", "_____no_output_____" ], [ "# Runs a single test case from both suites matched from test 2.\nplan.i.run_test_case(test_uid='Test2', suite_uid='*', case_uid='send_and_receive_msg')", "_____no_output_____" ], [ "# Print the test case report.\nprint('Serialized test:Test2, suite:TCPSuite - Custom_0, case:send_and_receive_msg report.')\npprint.pprint(plan.i.test_case_report(\n test_uid='Test1', suite_uid='TCPSuite - Custom_0', case_uid='send_and_receive_msg',\n serialized=True))", "_____no_output_____" ], [ "# Abort the plan.\nplan.abort()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d013781d76d2f84e829db951cfc31cbeea672ad6
331,410
ipynb
Jupyter Notebook
Tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb
StillKeepTry/CNTK
356eb21f8edcaf5d8e0510367ff01c6092062ec6
[ "RSA-MD" ]
null
null
null
Tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb
StillKeepTry/CNTK
356eb21f8edcaf5d8e0510367ff01c6092062ec6
[ "RSA-MD" ]
null
null
null
Tutorials/CNTK_201B_CIFAR-10_ImageHandsOn.ipynb
StillKeepTry/CNTK
356eb21f8edcaf5d8e0510367ff01c6092062ec6
[ "RSA-MD" ]
1
2018-12-28T14:03:59.000Z
2018-12-28T14:03:59.000Z
259.725705
26,880
0.898244
[ [ [ "from IPython.display import Image", "_____no_output_____" ] ], [ [ "# CNTK 201B: Hands On Labs Image Recognition", "_____no_output_____" ], [ "This hands-on lab shows how to implement image recognition task using [convolution network][] with CNTK v2 Python API. You will start with a basic feedforward CNN architecture in order to classify Cifar dataset, then you will keep adding advanced feature to your network. Finally, you will implement a VGG net and residual net similar to the one that won ImageNet competition but smaller in size.\n\n[convolution network]:https://en.wikipedia.org/wiki/Convolutional_neural_network\n\n## Introduction\n\nIn this hands-on, you will practice the following:\n\n* Understanding subset of CNTK python API needed for image classification task.\n* Write a custom convolution network to classify Cifar dataset.\n* Modifying the network structure by adding:\n * [Dropout][] layer.\n * Batchnormalization layer.\n* Implement a [VGG][] style network.\n* Introduction to Residual Nets (RESNET).\n* Implement and train [RESNET network][].\n\n[RESNET network]:https://github.com/Microsoft/CNTK/wiki/Hands-On-Labs-Image-Recognition\n[VGG]:http://www.robots.ox.ac.uk/~vgg/research/very_deep/\n[Dropout]:https://en.wikipedia.org/wiki/Dropout_(neural_networks)\n\n## Prerequisites\n\nCNTK 201A hands-on lab, in which you will download and prepare Cifar dataset is a prerequisites for this lab. This tutorial depends on CNTK v2, so before starting this lab you will need to install CNTK v2. Furthermore, all the tutorials in this lab are done in python, therefore, you will need a basic knowledge of Python.\n\nCNTK 102 lab is recommended but not a prerequisites for this tutorials. However, a basic understanding of Deep Learning is needed.\n\n## Dataset\n\nYou will use Cifar 10 dataset, from https://www.cs.toronto.edu/~kriz/cifar.html, during this tutorials. The dataset contains 50000 training images and 10000 test images, all images are 32x32x3. Each image is classified as one of 10 classes as shown below:", "_____no_output_____" ] ], [ [ "# Figure 1\nImage(url=\"https://cntk.ai/jup/201/cifar-10.png\", width=500, height=500)", "_____no_output_____" ] ], [ [ "The above image is from: https://www.cs.toronto.edu/~kriz/cifar.html\n\n## Convolution Neural Network (CNN)\n\nConvolution Neural Network (CNN) is a feedforward network comprise of a bunch of layers in such a way that the output of one layer is fed to the next layer (There are more complex architecture that skip layers, we will discuss one of those at the end of this lab). Usually, CNN start with alternating between convolution layer and pooling layer (downsample), then end up with fully connected layer for the classification part.\n\n### Convolution layer\n\nConvolution layer consist of multiple 2D convolution kernels applied on the input image or the previous layer, each convolution kernel output a feature map.", "_____no_output_____" ] ], [ [ "# Figure 2\nImage(url=\"https://cntk.ai/jup/201/Conv2D.png\")", "_____no_output_____" ] ], [ [ "The stack of feature maps output are the input to the next layer.", "_____no_output_____" ] ], [ [ "# Figure 3\nImage(url=\"https://cntk.ai/jup/201/Conv2DFeatures.png\")", "_____no_output_____" ] ], [ [ "> Gradient-Based Learning Applied to Document Recognition, Proceedings of the IEEE, 86(11):2278-2324, November 1998\n> Y. LeCun, L. Bottou, Y. Bengio and P. Haffner\n\n#### In CNTK:\n\nHere the [convolution][] layer in Python:\n\n```python\ndef Convolution(filter_shape, # e.g. (3,3)\n num_filters, # e.g. 64\n activation, # relu or None...etc.\n init, # Random initialization\n pad, # True or False\n strides) # strides e.g. (1,1)\n```\n\n[convolution]:https://www.cntk.ai/pythondocs/layerref.html#convolution\n\n### Pooling layer\n\nIn most CNN vision architecture, each convolution layer is succeeded by a pooling layer, so they keep alternating until the fully connected layer. \n\nThe purpose of the pooling layer is as follow:\n\n* Reduce the dimensionality of the previous layer, which speed up the network.\n* Provide a limited translation invariant.\n\nHere an example of max pooling with a stride of 2:", "_____no_output_____" ] ], [ [ "# Figure 4\nImage(url=\"https://cntk.ai/jup/201/MaxPooling.png\", width=400, height=400)", "_____no_output_____" ] ], [ [ "#### In CNTK:\n\nHere the [pooling][] layer in Python:\n\n```python\n\n# Max pooling\ndef MaxPooling(filter_shape, # e.g. (3,3)\n strides, # (2,2)\n pad) # True or False\n\n# Average pooling\ndef AveragePooling(filter_shape, # e.g. (3,3)\n strides, # (2,2)\n pad) # True or False\n```\n\n[pooling]:https://www.cntk.ai/pythondocs/layerref.html#maxpooling-averagepooling\n\n### Dropout layer\n\nDropout layer takes a probability value as an input, the value is called the dropout rate. Let's say the dropu rate is 0.5, what this layer does it pick at random 50% of the nodes from the previous layer and drop them out of the nework. This behavior help regularize the network.\n\n> Dropout: A Simple Way to Prevent Neural Networks from Overfitting\n> Nitish Srivastava, Geoffrey Hinton, Alex Krizhevsky, Ilya Sutskever, Ruslan Salakhutdinov\n\n\n#### In CNTK:\n\nDropout layer in Python:\n\n```python\n\n# Dropout\ndef Dropout(prob) # dropout rate e.g. 0.5\n```\n\n### Batch normalization (BN)\n\nBatch normalization is a way to make the input to each layer has zero mean and unit variance. BN help the network converge faster and keep the input of each layer around zero. BN has two learnable parameters called gamma and beta, the purpose of those parameters is for the network to decide for itself if the normalized input is what is best or the raw input.\n\n> Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift\n> Sergey Ioffe, Christian Szegedy\n\n#### In CNTK:\n\n[Batch normalization][] layer in Python:\n\n```python\n\n# Batch normalization\ndef BatchNormalization(map_rank) # For image map_rank=1\n```\n\n[Batch normalization]:https://www.cntk.ai/pythondocs/layerref.html#batchnormalization-layernormalization-stabilizer\n\n## Microsoft Cognitive Network Toolkit (CNTK)\n\nCNTK is a highly flexible computation graphs, each node take inputs as tensors and produce tensors as the result of the computation. Each node is exposed in Python API, which give you the flexibility of creating any custom graphs, you can also define your own node in Python or C++ using CPU, GPU or both.\n\nFor Deep learning, you can use the low level API directly or you can use CNTK layered API. We will start with the low level API, then switch to the layered API in this lab.\n\nSo let's first import the needed modules for this lab.", "_____no_output_____" ] ], [ [ "from __future__ import print_function\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\nfrom cntk.layers import default_options, Convolution, MaxPooling, AveragePooling, Dropout, BatchNormalization, Dense, Sequential, For\nfrom cntk.io import MinibatchSource, ImageDeserializer, StreamDef, StreamDefs\nimport cntk.io.transforms as xforms \nfrom cntk.initializer import glorot_uniform, he_normal\nfrom cntk import Trainer\nfrom cntk.learner import momentum_sgd, learning_rate_schedule, UnitType, momentum_as_time_constant_schedule\nfrom cntk.ops import cross_entropy_with_softmax, classification_error, relu, input_variable, softmax, element_times\nfrom cntk.utils import *", "_____no_output_____" ], [ "# Figure 5\nImage(url=\"https://cntk.ai/jup/201/CNN.png\")", "_____no_output_____" ] ], [ [ "Now that we imported the needed modules, let's implement our first CNN, as shown in Figure 5 above.\n\nLet's implement the above network using CNTK layer API:", "_____no_output_____" ] ], [ [ "def create_basic_model(input, out_dims):\n \n net = Convolution((5,5), 32, init=glorot_uniform(), activation=relu, pad=True)(input)\n net = MaxPooling((3,3), strides=(2,2))(net)\n\n net = Convolution((5,5), 32, init=glorot_uniform(), activation=relu, pad=True)(net)\n net = MaxPooling((3,3), strides=(2,2))(net)\n\n net = Convolution((5,5), 64, init=glorot_uniform(), activation=relu, pad=True)(net)\n net = MaxPooling((3,3), strides=(2,2))(net)\n \n net = Dense(64, init=glorot_uniform())(net)\n net = Dense(out_dims, init=glorot_uniform(), activation=None)(net)\n \n return net", "_____no_output_____" ] ], [ [ "To train the above model we need two things:\n* Read the training images and their corresponding labels.\n* Define a cost function, compute the cost for each mini-batch and update the model weights according to the cost value.\n\nTo read the data in CNTK, we will use CNTK readers which handle data augmentation and can fetch data in parallel.\n\nExample of a map text file:\n\n S:\\data\\CIFAR-10\\train\\00001.png\t9\n S:\\data\\CIFAR-10\\train\\00002.png\t9\n S:\\data\\CIFAR-10\\train\\00003.png\t4\n S:\\data\\CIFAR-10\\train\\00004.png\t1\n S:\\data\\CIFAR-10\\train\\00005.png\t1\n", "_____no_output_____" ] ], [ [ "# model dimensions\nimage_height = 32\nimage_width = 32\nnum_channels = 3\nnum_classes = 10\n\n#\n# Define the reader for both training and evaluation action.\n#\ndef create_reader(map_file, mean_file, train):\n if not os.path.exists(map_file) or not os.path.exists(mean_file):\n raise RuntimeError(\"This tutorials depends 201A tutorials, please run 201A first.\")\n\n # transformation pipeline for the features has jitter/crop only when training\n transforms = []\n if train:\n transforms += [\n xforms.crop(crop_type='randomside', side_ratio=0.8) # train uses data augmentation (translation only)\n ]\n transforms += [\n xforms.scale(width=image_width, height=image_height, channels=num_channels, interpolations='linear'),\n xforms.mean(mean_file)\n ]\n # deserializer\n return MinibatchSource(ImageDeserializer(map_file, StreamDefs(\n features = StreamDef(field='image', transforms=transforms), # first column in map file is referred to as 'image'\n labels = StreamDef(field='label', shape=num_classes) # and second as 'label'\n )))", "_____no_output_____" ] ], [ [ "Now let us write the the training and validation loop.", "_____no_output_____" ] ], [ [ "#\n# Train and evaluate the network.\n#\ndef train_and_evaluate(reader_train, reader_test, max_epochs, model_func):\n # Input variables denoting the features and label data\n input_var = input_variable((num_channels, image_height, image_width))\n label_var = input_variable((num_classes))\n\n # Normalize the input\n feature_scale = 1.0 / 256.0\n input_var_norm = element_times(feature_scale, input_var)\n \n # apply model to input\n z = model_func(input_var_norm, out_dims=10)\n\n #\n # Training action\n #\n\n # loss and metric\n ce = cross_entropy_with_softmax(z, label_var)\n pe = classification_error(z, label_var)\n\n # training config\n epoch_size = 50000\n minibatch_size = 64\n\n # Set training parameters\n lr_per_minibatch = learning_rate_schedule([0.01]*10 + [0.003]*10 + [0.001], UnitType.minibatch, epoch_size)\n momentum_time_constant = momentum_as_time_constant_schedule(-minibatch_size/np.log(0.9))\n l2_reg_weight = 0.001\n \n # trainer object\n learner = momentum_sgd(z.parameters, \n lr = lr_per_minibatch, momentum = momentum_time_constant, \n l2_regularization_weight=l2_reg_weight)\n progress_printer = ProgressPrinter(tag='Training', num_epochs=max_epochs)\n trainer = Trainer(z, (ce, pe), [learner], [progress_printer])\n\n # define mapping from reader streams to network inputs\n input_map = {\n input_var: reader_train.streams.features,\n label_var: reader_train.streams.labels\n }\n\n log_number_of_parameters(z) ; print()\n\n # perform model training\n batch_index = 0\n plot_data = {'batchindex':[], 'loss':[], 'error':[]}\n for epoch in range(max_epochs): # loop over epochs\n sample_count = 0\n while sample_count < epoch_size: # loop over minibatches in the epoch\n data = reader_train.next_minibatch(min(minibatch_size, epoch_size - sample_count), input_map=input_map) # fetch minibatch.\n trainer.train_minibatch(data) # update model with it\n\n sample_count += data[label_var].num_samples # count samples processed so far\n \n # For visualization... \n plot_data['batchindex'].append(batch_index)\n plot_data['loss'].append(trainer.previous_minibatch_loss_average)\n plot_data['error'].append(trainer.previous_minibatch_evaluation_average)\n \n batch_index += 1\n trainer.summarize_training_progress()\n \n #\n # Evaluation action\n #\n epoch_size = 10000\n minibatch_size = 16\n\n # process minibatches and evaluate the model\n metric_numer = 0\n metric_denom = 0\n sample_count = 0\n minibatch_index = 0\n\n while sample_count < epoch_size:\n current_minibatch = min(minibatch_size, epoch_size - sample_count)\n\n # Fetch next test min batch.\n data = reader_test.next_minibatch(current_minibatch, input_map=input_map)\n\n # minibatch data to be trained with\n metric_numer += trainer.test_minibatch(data) * current_minibatch\n metric_denom += current_minibatch\n\n # Keep track of the number of samples processed so far.\n sample_count += data[label_var].num_samples\n minibatch_index += 1\n\n print(\"\")\n print(\"Final Results: Minibatch[1-{}]: errs = {:0.1f}% * {}\".format(minibatch_index+1, (metric_numer*100.0)/metric_denom, metric_denom))\n print(\"\")\n \n # Visualize training result:\n window_width = 32\n loss_cumsum = np.cumsum(np.insert(plot_data['loss'], 0, 0)) \n error_cumsum = np.cumsum(np.insert(plot_data['error'], 0, 0)) \n\n # Moving average.\n plot_data['batchindex'] = np.insert(plot_data['batchindex'], 0, 0)[window_width:]\n plot_data['avg_loss'] = (loss_cumsum[window_width:] - loss_cumsum[:-window_width]) / window_width\n plot_data['avg_error'] = (error_cumsum[window_width:] - error_cumsum[:-window_width]) / window_width\n \n plt.figure(1)\n plt.subplot(211)\n plt.plot(plot_data[\"batchindex\"], plot_data[\"avg_loss\"], 'b--')\n plt.xlabel('Minibatch number')\n plt.ylabel('Loss')\n plt.title('Minibatch run vs. Training loss ')\n\n plt.show()\n\n plt.subplot(212)\n plt.plot(plot_data[\"batchindex\"], plot_data[\"avg_error\"], 'r--')\n plt.xlabel('Minibatch number')\n plt.ylabel('Label Prediction Error')\n plt.title('Minibatch run vs. Label Prediction Error ')\n plt.show()\n \n return softmax(z)", "_____no_output_____" ], [ "data_path = os.path.join('data', 'CIFAR-10')\nreader_train = create_reader(os.path.join(data_path, 'train_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), True)\nreader_test = create_reader(os.path.join(data_path, 'test_map.txt'), os.path.join(data_path, 'CIFAR-10_mean.xml'), False)\n\npred = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model)", "Training 116906 parameters in 10 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 2.062444 * 50000, metric = 75.3% * 50000 13.316s (3754.8 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.675133 * 50000, metric = 61.7% * 50000 13.772s (3630.5 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.520789 * 50000, metric = 55.4% * 50000 13.674s (3656.7 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.421881 * 50000, metric = 51.4% * 50000 13.668s (3658.2 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 1.338381 * 50000, metric = 48.0% * 50000 13.675s (3656.3 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 43.3% * 10000\n\n" ] ], [ [ "Although, this model is very simple, it still has too much code, we can do better. Here the same model in more terse format:", "_____no_output_____" ] ], [ [ "def create_basic_model_terse(input, out_dims):\n\n with default_options(activation=relu):\n model = Sequential([\n For(range(3), lambda i: [\n Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True),\n MaxPooling((3,3), strides=(2,2))\n ]),\n Dense(64, init=glorot_uniform()),\n Dense(out_dims, init=glorot_uniform(), activation=None)\n ])\n\n return model(input)", "_____no_output_____" ], [ "pred_basic_model = train_and_evaluate(reader_train, reader_test, max_epochs=10, model_func=create_basic_model_terse)", "Training 116906 parameters in 10 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 2.054147 * 50000, metric = 75.0% * 50000 13.674s (3656.6 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.695077 * 50000, metric = 62.6% * 50000 14.271s (3503.7 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.542115 * 50000, metric = 56.3% * 50000 13.872s (3604.3 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.450798 * 50000, metric = 52.3% * 50000 13.823s (3617.3 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 1.373555 * 50000, metric = 49.2% * 50000 13.857s (3608.4 samples per second);\nFinished Epoch[6 of 300]: [Training] loss = 1.300828 * 50000, metric = 46.6% * 50000 13.965s (3580.3 samples per second);\nFinished Epoch[7 of 300]: [Training] loss = 1.232516 * 50000, metric = 43.7% * 50000 13.827s (3616.0 samples per second);\nFinished Epoch[8 of 300]: [Training] loss = 1.189415 * 50000, metric = 42.0% * 50000 13.885s (3600.9 samples per second);\nFinished Epoch[9 of 300]: [Training] loss = 1.134052 * 50000, metric = 39.9% * 50000 13.871s (3604.6 samples per second);\nFinished Epoch[10 of 300]: [Training] loss = 1.098405 * 50000, metric = 38.9% * 50000 13.961s (3581.3 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 36.2% * 10000\n\n" ] ], [ [ "Now that we have a trained model, let's classify the following image:", "_____no_output_____" ] ], [ [ "# Figure 6\nImage(url=\"https://cntk.ai/jup/201/00014.png\", width=64, height=64)", "_____no_output_____" ], [ "import PIL\n\ndef eval(pred_op, image_path):\n label_lookup = [\"airplane\", \"automobile\", \"bird\", \"cat\", \"deer\", \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"]\n image_mean = 133.0\n image_data = np.array(PIL.Image.open(image_path), dtype=np.float32)\n image_data -= image_mean\n image_data = np.ascontiguousarray(np.transpose(image_data, (2, 0, 1)))\n \n result = np.squeeze(pred_op.eval({pred_op.arguments[0]:[image_data]}))\n \n # Return top 3 results:\n top_count = 3\n result_indices = (-np.array(result)).argsort()[:top_count]\n\n print(\"Top 3 predictions:\")\n for i in range(top_count):\n print(\"\\tLabel: {:10s}, confidence: {:.2f}%\".format(label_lookup[result_indices[i]], result[result_indices[i]] * 100))", "_____no_output_____" ], [ "eval(pred_basic_model, \"data/CIFAR-10/test/00014.png\")", "Top 3 predictions:\n\tLabel: truck , confidence: 98.95%\n\tLabel: ship , confidence: 0.46%\n\tLabel: automobile, confidence: 0.26%\n" ] ], [ [ "Adding dropout layer, with drop rate of 0.25, before the last dense layer:", "_____no_output_____" ] ], [ [ "def create_basic_model_with_dropout(input, out_dims):\n\n with default_options(activation=relu):\n model = Sequential([\n For(range(3), lambda i: [\n Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True),\n MaxPooling((3,3), strides=(2,2))\n ]),\n Dense(64, init=glorot_uniform()),\n Dropout(0.25),\n Dense(out_dims, init=glorot_uniform(), activation=None)\n ])\n\n return model(input)", "_____no_output_____" ], [ "pred_basic_model_dropout = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model_with_dropout)", "Training 116906 parameters in 10 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 2.123667 * 50000, metric = 78.7% * 50000 16.391s (3050.5 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.817045 * 50000, metric = 67.9% * 50000 16.894s (2959.5 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.678272 * 50000, metric = 62.2% * 50000 17.006s (2940.1 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.583182 * 50000, metric = 58.1% * 50000 16.644s (3004.1 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 1.514311 * 50000, metric = 55.3% * 50000 16.790s (2977.9 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 49.2% * 10000\n\n" ] ], [ [ "Add batch normalization after each convolution and before the last dense layer:", "_____no_output_____" ] ], [ [ "def create_basic_model_with_batch_normalization(input, out_dims):\n\n with default_options(activation=relu):\n model = Sequential([\n For(range(3), lambda i: [\n Convolution((5,5), [32,32,64][i], init=glorot_uniform(), pad=True),\n BatchNormalization(map_rank=1),\n MaxPooling((3,3), strides=(2,2))\n ]),\n Dense(64, init=glorot_uniform()),\n BatchNormalization(map_rank=1),\n Dense(out_dims, init=glorot_uniform(), activation=None)\n ])\n\n return model(input)", "_____no_output_____" ], [ "pred_basic_model_bn = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_basic_model_with_batch_normalization)", "Training 117290 parameters in 18 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 1.512835 * 50000, metric = 54.1% * 50000 15.499s (3226.1 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.206524 * 50000, metric = 42.8% * 50000 16.071s (3111.2 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.087695 * 50000, metric = 38.3% * 50000 16.160s (3094.1 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.008182 * 50000, metric = 35.4% * 50000 16.057s (3113.8 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 0.953168 * 50000, metric = 33.4% * 50000 16.247s (3077.4 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 30.8% * 10000\n\n" ] ], [ [ "Let's implement an inspired VGG style network, using layer API, here the architecture:\n\n| VGG9 |\n| ------------- |\n| conv3-64 |\n| conv3-64 |\n| max3 |\n| |\n| conv3-96 |\n| conv3-96 |\n| max3 |\n| |\n| conv3-128 |\n| conv3-128 |\n| max3 |\n| |\n| FC-1024 |\n| FC-1024 |\n| |\n| FC-10 |\n", "_____no_output_____" ] ], [ [ "def create_vgg9_model(input, out_dims):\n with default_options(activation=relu):\n model = Sequential([\n For(range(3), lambda i: [\n Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True),\n Convolution((3,3), [64,96,128][i], init=glorot_uniform(), pad=True),\n MaxPooling((3,3), strides=(2,2))\n ]),\n For(range(2), lambda : [\n Dense(1024, init=glorot_uniform())\n ]),\n Dense(out_dims, init=glorot_uniform(), activation=None)\n ])\n \n return model(input)", "_____no_output_____" ], [ "pred_vgg = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_vgg9_model)", "Training 2675978 parameters in 18 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 2.253115 * 50000, metric = 83.6% * 50000 46.007s (1086.8 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.931100 * 50000, metric = 71.8% * 50000 46.236s (1081.4 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.706618 * 50000, metric = 63.3% * 50000 46.271s (1080.6 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.576171 * 50000, metric = 58.1% * 50000 46.348s (1078.8 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 1.473403 * 50000, metric = 53.7% * 50000 46.386s (1077.9 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 51.2% * 10000\n\n" ] ], [ [ "### Residual Network (ResNet)\n\nOne of the main problem of a Deep Neural Network is how to propagate the error all the way to the first layer. For a deep network, the gradient keep getting smaller until it has no effect on the network weights. [ResNet](https://arxiv.org/abs/1512.03385) was designed to overcome such problem, by defining a block with identity path, as shown below:", "_____no_output_____" ] ], [ [ "# Figure 7\nImage(url=\"https://cntk.ai/jup/201/ResNetBlock2.png\")", "_____no_output_____" ] ], [ [ "The idea of the above block is 2 folds:\n\n* During back propagation the gradient have a path that doesn't affect its magnitude.\n* The network need to learn residual mapping (delta to x).\n\nSo let's implements ResNet blocks using CNTK:\n\n ResNetNode ResNetNodeInc\n | |\n +------+------+ +---------+----------+\n | | | |\n V | V V\n +----------+ | +--------------+ +----------------+\n | Conv, BN | | | Conv x 2, BN | | SubSample, BN |\n +----------+ | +--------------+ +----------------+\n | | | |\n V | V |\n +-------+ | +-------+ |\n | ReLU | | | ReLU | |\n +-------+ | +-------+ |\n | | | |\n V | V |\n +----------+ | +----------+ |\n | Conv, BN | | | Conv, BN | |\n +----------+ | +----------+ |\n | | | |\n | +---+ | | +---+ |\n +--->| + |<---+ +------>+ + +<-------+\n +---+ +---+\n | |\n V V\n +-------+ +-------+\n | ReLU | | ReLU |\n +-------+ +-------+\n | |\n V V\n", "_____no_output_____" ] ], [ [ "from cntk.ops import combine, times, element_times, AVG_POOLING\n\ndef convolution_bn(input, filter_size, num_filters, strides=(1,1), init=he_normal(), activation=relu):\n if activation is None:\n activation = lambda x: x\n \n r = Convolution(filter_size, num_filters, strides=strides, init=init, activation=None, pad=True, bias=False)(input)\n r = BatchNormalization(map_rank=1)(r)\n r = activation(r)\n \n return r\n\ndef resnet_basic(input, num_filters):\n c1 = convolution_bn(input, (3,3), num_filters)\n c2 = convolution_bn(c1, (3,3), num_filters, activation=None)\n p = c2 + input\n return relu(p)\n\ndef resnet_basic_inc(input, num_filters):\n c1 = convolution_bn(input, (3,3), num_filters, strides=(2,2))\n c2 = convolution_bn(c1, (3,3), num_filters, activation=None)\n\n s = convolution_bn(input, (1,1), num_filters, strides=(2,2), activation=None)\n \n p = c2 + s\n return relu(p)\n\ndef resnet_basic_stack(input, num_filters, num_stack):\n assert (num_stack > 0)\n \n r = input\n for _ in range(num_stack):\n r = resnet_basic(r, num_filters)\n return r", "_____no_output_____" ] ], [ [ "Let's write the full model:", "_____no_output_____" ] ], [ [ "def create_resnet_model(input, out_dims):\n conv = convolution_bn(input, (3,3), 16)\n r1_1 = resnet_basic_stack(conv, 16, 3)\n\n r2_1 = resnet_basic_inc(r1_1, 32)\n r2_2 = resnet_basic_stack(r2_1, 32, 2)\n\n r3_1 = resnet_basic_inc(r2_2, 64)\n r3_2 = resnet_basic_stack(r3_1, 64, 2)\n\n # Global average pooling\n pool = AveragePooling(filter_shape=(8,8), strides=(1,1))(r3_2) \n net = Dense(out_dims, init=he_normal(), activation=None)(pool)\n \n return net", "_____no_output_____" ], [ "pred_resnet = train_and_evaluate(reader_train, reader_test, max_epochs=5, model_func=create_resnet_model)", "Training 272474 parameters in 65 parameter tensors.\n\nFinished Epoch[1 of 300]: [Training] loss = 1.859668 * 50000, metric = 69.3% * 50000 47.499s (1052.7 samples per second);\nFinished Epoch[2 of 300]: [Training] loss = 1.583096 * 50000, metric = 58.7% * 50000 48.541s (1030.0 samples per second);\nFinished Epoch[3 of 300]: [Training] loss = 1.453993 * 50000, metric = 53.4% * 50000 48.982s (1020.8 samples per second);\nFinished Epoch[4 of 300]: [Training] loss = 1.347815 * 50000, metric = 49.2% * 50000 48.704s (1026.6 samples per second);\nFinished Epoch[5 of 300]: [Training] loss = 1.269185 * 50000, metric = 45.8% * 50000 48.155s (1038.3 samples per second);\n\nFinal Results: Minibatch[1-626]: errs = 44.6% * 10000\n\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d0137cbbf38e69b96223386067e551ffaa681f70
422,205
ipynb
Jupyter Notebook
StockReturnsPrediction_fh21/StockReturnsPrediction_v2_DExpSmoothing.ipynb
clairvoyant/Stocks
ea1a75494dd9015d2cd9dace39105007d3f3ed96
[ "Apache-2.0" ]
265
2019-02-11T05:41:42.000Z
2022-03-31T17:10:29.000Z
StockReturnsPrediction_fh21/StockReturnsPrediction_v2_DExpSmoothing.ipynb
clairvoyant/Stocks
ea1a75494dd9015d2cd9dace39105007d3f3ed96
[ "Apache-2.0" ]
5
2019-09-09T13:02:39.000Z
2021-03-24T13:28:36.000Z
StockReturnsPrediction_fh21/StockReturnsPrediction_v2_DExpSmoothing.ipynb
clairvoyant/Stocks
ea1a75494dd9015d2cd9dace39105007d3f3ed96
[ "Apache-2.0" ]
213
2019-02-05T11:20:02.000Z
2022-03-31T06:25:45.000Z
184.12778
67,260
0.868156
[ [ [ "# Objective\n* 20190815:\n * Given stock returns for the last N days, we do prediction for the next N+H days, where H is the forecast horizon\n * We use double exponential smoothing to predict", "_____no_output_____" ] ], [ [ "%matplotlib inline\n\nimport math\nimport matplotlib\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport time\n\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom matplotlib import pyplot as plt\nfrom pylab import rcParams\nfrom sklearn.metrics import mean_squared_error\nfrom tqdm import tqdm_notebook\n\n#### Input params ##################\nstk_path = \"./data/VTI_20130102_20181231.csv\"\nH = 21\ntrain_size = 252*3 # Use 3 years of data as train set. Note there are about 252 trading days in a year\nval_size = 252 # Use 1 year of data as validation set\n\n# alpha - smoothing coeff\nalphaMax = 0.999\nalphaMin = 0.001\nalphaStep = 0.001\n\n# beta - trend coeff\nbetaMax = 0.999\nbetaMin = 0.001\nbetaStep = 0.001\n\nfontsize = 14\nticklabelsize = 14\n####################################\n\ntrain_val_size = train_size + val_size # Size of train+validation set\nprint(\"No. of days in train+validation set = \" + str(train_val_size))", "No. of days in train+validation set = 1008\n" ], [ "print(\"We will start forecasting on day %d\" % (train_val_size+1))", "We will start forecasting on day 1009\n" ] ], [ [ "# Common functions", "_____no_output_____" ] ], [ [ "def get_smape(y_true, y_pred):\n \"\"\"\n Compute symmetric mean absolute percentage error\n \"\"\"\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return 100/len(y_true) * np.sum(2 * np.abs(y_pred - y_true) / (np.abs(y_true) + np.abs(y_pred)))\n\ndef get_mape(y_true, y_pred): \n \"\"\"\n Compute mean absolute percentage error (MAPE)\n \"\"\"\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\ndef get_mae(a, b):\n \"\"\"\n Comp mean absolute error e_t = E[|a_t - b_t|]. a and b can be lists.\n Returns a vector of len = len(a) = len(b)\n \"\"\"\n return np.mean(abs(np.array(a)-np.array(b)))\n\ndef get_rmse(a, b):\n \"\"\"\n Comp RMSE. a and b can be lists.\n Returns a scalar.\n \"\"\"\n return math.sqrt(np.mean((np.array(a)-np.array(b))**2))\n\ndef double_exponential_smoothing(series, H, alpha=0.5, beta=0.5, return_all=False):\n \"\"\"\n Given a series and alpha, return series of smoothed points\n Initialization: \n S_1 = y_1, \n b_1 = y_2 - y_1, \n F_1 = 0, F_2 = y_1\n level, S_t = alpha*y_t + (1-alpha)*(S_t-1 + b_t-1)\n trend, b_t = beta*(S_t - S_t-1) + (1-beta)*b_t-1\n forecast, F_t+1 = S_t + b_t\n forecast, F_t+m = S_t + m*b_t\n result[len(series)] is the estimate of series[len(series)]\n Inputs\n series: series to forecast\n H : forecast horizon\n alpha : smoothing constant. \n When alpha is close to 1, dampening is quick. \n When alpha is close to 0, dampening is slow\n beta : smoothing constant for trend\n return_all : if 1 return both original series + predictions, if 0 return predictions only\n Outputs\n the predictions of length H\n \"\"\"\n result = [0, series[0]] \n for n in range(1, len(series)+H-1):\n if n == 1:\n level, trend = series[0], series[1] - series[0]\n if n >= len(series): # we are forecasting\n m = n - len(series) + 2\n result.append(level + m*trend) # result[len(series)+1] is the estimate of series[len(series)+1]\n else:\n value = series[n]\n last_level, level = level, alpha*value + (1-alpha)*(level+trend)\n trend = beta*(level-last_level) + (1-beta)*trend\n result.append(level+trend) \n # e.g. result[2] uses series[1] \n # ie. result[2] is the estimate of series[2]\n # e.g. result[len(series)] uses series[len(series)-1] \n # ie. result[len(series)] is the estimate of series[len(series)]\n \n if return_all == True:\n return result\n else:\n return result[len(series):len(series)+H]\n\n\ndef get_error_metrics(series, train_size, H, alpha, beta):\n \"\"\"\n Given a series consisting of both train+validation, do predictions of forecast horizon H on the validation set, \n at H/2 intervals.\n Inputs\n series : series to forecast, with length = (train_size + val_size)\n train_size : length of series to use as train ie. train set is series[:train_size]\n H : forecast horizon\n Outputs\n mean of rmse, mean of mape, mean of mae\n \"\"\"\n # Predict using single exponential smoothing, and compute error metrics also\n rmse = [] # root mean square error\n mape = [] # mean absolute percentage error\n mae = [] # mean absolute error\n smape = [] # symmetric mean absolute percentage error\n preds_dict = {}\n \n for i in range(train_size, len(series)-H, int(H/2)):\n preds_list = double_exponential_smoothing(series[i-train_size:i], H, alpha, beta)\n \n rmse.append(get_rmse(series[i:i+H], preds_list))\n mape.append(get_mape(series[i:i+H], preds_list))\n mae.append(get_mae(series[i:i+H], preds_list))\n smape.append(get_smape(series[i:i+H], preds_list))\n preds_dict[i] = preds_list\n \n return np.mean(rmse), np.mean(mape), np.mean(mae), np.mean(smape), preds_dict \n \ndef hyperpram_tune_alpha_beta(series, train_size, H):\n \"\"\"\n Given a series, tune hyperparameter alpha, fit and predict\n Inputs\n series : series to forecast, with length = (train_size + val_size)\n train_size : length of series to use as train ie. train set is series[:train_size]\n H : forecast horizon\n Outputs\n optimum hyperparameters, error metrics dataframe\n \"\"\"\n err_dict = defaultdict(list)\n alpha = alphaMin\n beta = betaMin\n while alpha <= alphaMax:\n while beta <= betaMax:\n rmse_mean, mape_mean, mae_mean, smape_mean, _ = get_error_metrics(series, train_size, H, alpha, beta)\n \n # Append alpha and beta\n err_dict['alpha'].append(alpha)\n err_dict['beta'].append(beta)\n \n # Compute error metrics\n err_dict['rmse'].append(rmse_mean)\n err_dict['mape'].append(mape_mean)\n err_dict['mae'].append(mae_mean)\n err_dict['smape'].append(smape_mean)\n \n # Increase beta by one step\n beta = beta + betaStep\n \n # Increase alpha by one step\n alpha = alpha + alphaStep\n \n # Convert to dataframe\n err_df = pd.DataFrame(err_dict)\n \n # Get min RMSE\n rmse_min = err_df['rmse'].min()\n \n return err_df[err_df['rmse'] == rmse_min]['alpha'].values[0], err_df[err_df['rmse'] == rmse_min]['beta'].values[0], err_df", "_____no_output_____" ] ], [ [ "# Load data", "_____no_output_____" ] ], [ [ "df = pd.read_csv(stk_path, sep = \",\")\n\n# Convert Date column to datetime\ndf.loc[:, 'Date'] = pd.to_datetime(df['Date'],format='%Y-%m-%d')\n\n# Change all column headings to be lower case, and remove spacing\ndf.columns = [str(x).lower().replace(' ', '_') for x in df.columns]\n\n# Sort by datetime\ndf.sort_values(by='date', inplace=True, ascending=True)\n\ndf.head(10)", "_____no_output_____" ], [ "df['date'].min(), df['date'].max() ", "_____no_output_____" ], [ "# Plot adjusted close over time\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\n\nax = df.plot(x='date', y='adj_close', style='b-', grid=True)\nax.set_xlabel(\"date\")\nax.set_ylabel(\"USD\")", "_____no_output_____" ] ], [ [ "# Get Stock Returns", "_____no_output_____" ] ], [ [ "df['returns'] = df['adj_close'].pct_change() * 100\ndf.loc[0, 'returns'] = 0 # set the first value of returns to be 0 for simplicity\ndf.head()", "_____no_output_____" ], [ "# Plot returns over time\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\nax = df.plot(x='date', y='returns', style='b-', grid=True)\nax.set_xlabel(\"date\")\nax.set_ylabel(\"returns\")", "_____no_output_____" ], [ "# Plot distribution of returns\nplt.figure(figsize=(12, 8), dpi=80)\nax = sns.distplot(df['returns'][1:])\nax.grid()\nax.set_xlabel('daily returns', fontsize = 14)\nax.set_ylabel(\"probability density function\", fontsize = 14)\nmatplotlib.rcParams.update({'font.size': 14})\n", "_____no_output_____" ] ], [ [ "# Predict for a specific H (forecast horizon) and a specific date", "_____no_output_____" ] ], [ [ "i = train_val_size # Predict for day i, for the next H-1 days. Note indexing of days start from 0.\nprint(\"Predicting on day %d, date %s, with forecast horizon H = %d\" % (i, df.iloc[i]['date'], H))", "Predicting on day 1008, date 2017-01-03 00:00:00, with forecast horizon H = 21\n" ], [ "# Predict\npreds_list = double_exponential_smoothing(df['returns'][i-train_val_size:i].values, H)\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the RMSE is %f\" % (H, i, df['date'][i], get_rmse(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the MAPE is %f\" % (H, i, df['date'][i], get_mape(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the SMAPE is %f\" % (H, i, df['date'][i], get_smape(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the MAE is %f\" % (H, i, df['date'][i], get_mae(df[i:i+H]['returns'], preds_list)))", "For forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the RMSE is 1.334463\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the MAPE is 1495.504267\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the SMAPE is 161.827924\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the MAE is 1.222152\n" ], [ "# Plot the predictions\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\nmatplotlib.rcParams.update({'font.size': 14})\n\nax = df.plot(x='date', y='returns', style='bx-', grid=True)\n\n# Plot the predictions\nax.plot(df['date'][i:i+H], preds_list, marker='x')\n \nax.set_xlabel(\"date\")\n# ax.set_ylabel(\"daily returns\")\nax.legend(['daily returns', 'predictions'])\n# ax.set_ylim([105, 120])\nax.set_xlim([date(2016, 11, 1), date(2017, 2, 28)])", "_____no_output_____" ] ], [ [ "# Predict for a specific H (forecast horizon) and a specific date, with hyperparameter tuning - alpha, beta", "_____no_output_____" ] ], [ [ "i = train_val_size # Predict for day i, for the next H-1 days. Note indexing of days start from 0.\nprint(\"Predicting on day %d, date %s, with forecast horizon H = %d\" % (i, df.iloc[i]['date'], H))", "Predicting on day 1008, date 2017-01-03 00:00:00, with forecast horizon H = 21\n" ], [ "# Get optimum hyperparams\nalpha_opt, beta_opt, err_df = hyperpram_tune_alpha_beta(df['returns'][i-train_val_size:i].values, train_size, H)\nprint(\"alpha_opt = \" + str(alpha_opt))\nprint(\"beta_opt = \" + str(beta_opt))\n# print(\"rmse opt = \" + str(err_df[(err_df['alpha']==alpha_opt) & (err_df['beta']==beta_opt)]['rmse'].values[0]))\nprint(err_df[(err_df['alpha']==alpha_opt) & (err_df['beta']==beta_opt)])\nerr_df", "/Users/yibin/anaconda3/lib/python3.7/site-packages/ipykernel_launcher.py:13: RuntimeWarning: divide by zero encountered in true_divide\n del sys.path[0]\n" ], [ "# Predict\npreds_list = double_exponential_smoothing(df['returns'][i-train_val_size:i].values, H, alpha_opt, beta_opt)\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the RMSE is %f\" % (H, i, df['date'][i], get_rmse(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the MAPE is %f\" % (H, i, df['date'][i], get_mape(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the SMAPE is %f\" % (H, i, df['date'][i], get_smape(df[i:i+H]['returns'], preds_list)))\nprint(\"For forecast horizon %d, predicting on day %d, date %s, the MAE is %f\" % (H, i, df['date'][i], get_mae(df[i:i+H]['returns'], preds_list)))", "For forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the RMSE is 0.572852\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the MAPE is 576.440435\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the SMAPE is 124.626051\nFor forecast horizon 21, predicting on day 1008, date 2017-01-03 00:00:00, the MAE is 0.455851\n" ], [ "# Plot the predictions\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\nmatplotlib.rcParams.update({'font.size': 14})\n\nax = df.plot(x='date', y='returns', style='bx-', grid=True)\n\n# Plot the predictions\nax.plot(df['date'][i:i+H], preds_list, marker='x')\n \nax.set_xlabel(\"date\")\n# ax.set_ylabel(\"USD\")\nax.legend(['returns', 'predictions'])\n# ax.set_ylim([105, 120])\nax.set_xlim([date(2016, 11, 1), date(2017, 2, 28)])", "_____no_output_____" ] ], [ [ "# Predict for a specific H (forecast horizon), and various dates, using model trained in previous step", "_____no_output_____" ] ], [ [ "print(\"alpha_opt = \" + str(alpha_opt))\nprint(\"beta_opt = \" + str(beta_opt))", "alpha_opt = 0.001\nbeta_opt = 0.6060000000000004\n" ], [ "# Predict and compute error metrics also\nrmse = [] # root mean square error\nmape = [] # mean absolute percentage error\nmae = [] # mean absolute error\nsmape = [] # symmetric mean absolute percentage error\npreds_dict = {}\ni_list = range(train_val_size, train_val_size+84*5+42+1, 42)\nfor i in i_list:\n print(\"Predicting on day %d, date %s\" % (i, df.iloc[i]['date']))\n \n preds_list = double_exponential_smoothing(df['returns'][i-train_val_size:i].values, H, alpha_opt, beta_opt)\n \n # Collect the predictions\n preds_dict[i] = preds_list\n \n # Compute error metrics\n rmse.append(get_rmse(df[i:i+H]['returns'], preds_list))\n mape.append(get_mape(df[i:i+H]['returns'], preds_list))\n mae.append(get_mae(df[i:i+H]['returns'], preds_list))\n smape.append(get_smape(df[i:i+H]['returns'], preds_list))\n\nprint(\"Altogether we made %d forecasts, each of length %d days\" % (len(rmse), H))", "Predicting on day 1008, date 2017-01-03 00:00:00\nPredicting on day 1050, date 2017-03-06 00:00:00\nPredicting on day 1092, date 2017-05-04 00:00:00\nPredicting on day 1134, date 2017-07-05 00:00:00\nPredicting on day 1176, date 2017-09-01 00:00:00\nPredicting on day 1218, date 2017-11-01 00:00:00\nPredicting on day 1260, date 2018-01-03 00:00:00\nPredicting on day 1302, date 2018-03-06 00:00:00\nPredicting on day 1344, date 2018-05-04 00:00:00\nPredicting on day 1386, date 2018-07-05 00:00:00\nPredicting on day 1428, date 2018-09-04 00:00:00\nPredicting on day 1470, date 2018-11-01 00:00:00\nAltogether we made 12 forecasts, each of length 21 days\n" ], [ "print(\"For forecast horizon %d, the mean RMSE is %f\" % (H, np.mean(rmse)))\nprint(\"For forecast horizon %d, the mean MAPE is %f\" % (H, np.mean(mape)))\nprint(\"For forecast horizon %d, the mean SMAPE is %f\" % (H, np.mean(smape)))\nprint(\"For forecast horizon %d, the mean MAE is %f\" % (H, np.mean(mae)))", "For forecast horizon 21, the mean RMSE is 2.784676\nFor forecast horizon 21, the mean MAPE is inf\nFor forecast horizon 21, the mean SMAPE is 160.611727\nFor forecast horizon 21, the mean MAE is 2.315516\n" ], [ "results_final_no_tune = pd.DataFrame({'day': i_list,\n 'alpha_opt': [alpha_opt]*len(i_list),\n 'beta_opt': [beta_opt]*len(i_list),\n 'rmse': rmse,\n 'mape': mape,\n 'mae': mae,\n 'smape': smape})\nresults_final_no_tune", "_____no_output_____" ], [ "# Plot the predictions, and zoom in\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\n\nax = df.plot(x='date', y='returns', style='b-', grid=True)\n\n# Plot the predictions\nfor key in preds_dict:\n ax.plot(df['date'][key:key+H], preds_dict[key])\n \nax.set_xlabel(\"date\")\n# ax.set_ylabel(\"USD\")\nax.legend(['returns', 'predictions'])\n# ax.set_ylim([105, 150])\nax.set_xlim([date(2017, 1, 1), date(2018, 12, 31)])", "_____no_output_____" ] ], [ [ "# Predict for a specific H (forecast horizon), and various dates, tuning model for every prediction", "_____no_output_____" ] ], [ [ "# Predict and compute error metrics also\npreds_dict = {}\nresults_final = defaultdict(list)\ni_list = range(train_val_size, train_val_size+84*5+42+1, 42)\nfor i in i_list:\n print(\"Predicting on day %d, date %s\" % (i, df.iloc[i]['date']))\n \n # Get optimum hyperparams\n alpha_opt, beta_opt, err_df = hyperpram_tune_alpha_beta(df['returns'][i-train_val_size:i].values, train_size, H)\n \n preds_list = double_exponential_smoothing(df['returns'][i-train_val_size:i].values, H, alpha_opt, beta_opt)\n \n # Collect the predictions\n preds_dict[i] = preds_list\n \n # Compute error metrics\n results_final['rmse'].append(get_rmse(df[i:i+H]['returns'], preds_list))\n results_final['mape'].append(get_mape(df[i:i+H]['returns'], preds_list))\n results_final['mae'].append(get_mae(df[i:i+H]['returns'], preds_list))\n results_final['smape'].append(get_smape(df[i:i+H]['returns'], preds_list))\n results_final['alpha_opt'].append(alpha_opt)\n results_final['beta_opt'].append(beta_opt)\n \nresults_final = pd.DataFrame(results_final)\n\nprint(\"Altogether we made %d forecasts, each of length %d days\" % (len(rmse), H))", "Predicting on day 1008, date 2017-01-03 00:00:00\n" ], [ "print(\"For forecast horizon %d, the mean RMSE is %f\" % (H, results_final['rmse'].mean()))\nprint(\"For forecast horizon %d, the mean MAPE is %f\" % (H, results_final['mape'].mean()))\nprint(\"For forecast horizon %d, the mean SMAPE is %f\" % (H, results_final['smape'].mean()))\nprint(\"For forecast horizon %d, the mean MAE is %f\" % (H, results_final['mae'].mean()))", "For forecast horizon 21, the mean RMSE is 9.621568\nFor forecast horizon 21, the mean MAPE is inf\nFor forecast horizon 21, the mean SMAPE is 177.869623\nFor forecast horizon 21, the mean MAE is 9.356783\n" ], [ "# results\nresults_final", "_____no_output_____" ], [ "# Plot the predictions, and zoom in\nrcParams['figure.figsize'] = 10, 8 # width 10, height 8\n\nax = df.plot(x='date', y='returns', style='b-', grid=True)\n\n# Plot the predictions\nfor key in preds_dict:\n ax.plot(df['date'][key:key+H], preds_dict[key])\n \nax.set_xlabel(\"date\")\n# ax.set_ylabel(\"USD\")\nax.legend(['returns', 'predictions'])\n# ax.set_ylim([105, 150])\nax.set_xlim([date(2017, 1, 1), date(2018, 12, 31)])", "_____no_output_____" ], [ "# Plot scatter plot of actual values vs. predictions\nfor key in preds_dict:\n plt.plot(df['returns'][key:key+H], preds_dict[key], 'x')\n \nplt.plot(range(-3, 4, 1), range(-3, 4, 1), 'b-')\n \nplt.xlabel('returns')\nplt.ylabel('predictions')\nplt.grid()", "_____no_output_____" ] ], [ [ "# Findings", "_____no_output_____" ], [ "Double exponential smoothing does not predict stock returns well.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
d013939ebf669c7541a5d728ac33c9a6f3ec639a
55,608
ipynb
Jupyter Notebook
code/plot_activation_functions.ipynb
p-koo/exponential_activations
7e48054b64a565364439c45932338a09eb2eb4d3
[ "MIT" ]
1
2021-09-18T04:09:07.000Z
2021-09-18T04:09:07.000Z
code/plot_activation_functions.ipynb
koo-lab/exponential_activations
9032a360c1abb0f07b824e3ce6d20707efe306fd
[ "MIT" ]
null
null
null
code/plot_activation_functions.ipynb
koo-lab/exponential_activations
9032a360c1abb0f07b824e3ce6d20707efe306fd
[ "MIT" ]
4
2020-08-03T02:08:42.000Z
2021-10-01T18:46:47.000Z
380.876712
27,916
0.933912
[ [ [ "# Notebook generates plots of activation functions\n\nFigures generated include:\n- Fig. 1a\n- Supp Fig. 7\n\n", "_____no_output_____" ] ], [ [ "import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "x = np.linspace(-5,5,50)\nx_neg = np.linspace(-5,0,50)\nx_pos = np.linspace(0, 5,50)\nx_elu = np.concatenate([x_neg, x_pos])\nelu = np.concatenate([.5*(np.exp(x_neg)-1), x_pos])\n \nfig = plt.figure(figsize=(5,5))\nax = plt.subplot(111)\nplt.plot(x, np.exp(x), linewidth=2, alpha=0.7)\nplt.plot(x, np.maximum(x,0), linewidth=2, alpha=0.7)\nplt.plot(x, 1/(1+np.exp(-x)), linewidth=2, alpha=0.7)\nplt.plot(x, np.tanh(x), linewidth=2, alpha=0.7)\nplt.plot(x, np.log(1+np.exp(x)), linewidth=2, alpha=0.7)\nplt.plot(x, x, linewidth=2, alpha=0.7)\nplt.plot(x_elu, elu, linewidth=2, alpha=0.7)\nplt.xticks([-4, -2, 0, 2, 4], fontsize=14)\nplt.yticks([-4, -2, 0, 2, 4], fontsize=14)\nplt.plot([-5,5], [0, 0], 'k', alpha=0.1)\nplt.plot([-0,0], [-5, 5], 'k', alpha=0.1)\nplt.axis('tight')\nax.set_ybound([-5, 5])\nax.set_xbound([-5, 5])\nplt.xlabel('x', fontsize=14)\nplt.ylabel('f(x)', fontsize=14)\nplt.legend(['Exp', 'Relu', 'Sigmoid', 'Tanh', 'Softplus', 'Linear', 'Elu'], frameon=False, fontsize=12)\n\noutfile = os.path.join('../results', 'activations.pdf')\nfig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight')", "_____no_output_____" ], [ "x = np.linspace(-5,5,20)\n\nfig = plt.figure(figsize=(5,5))\nax = plt.subplot()\nplt.plot(x, np.exp(x), linewidth=2)\nplt.plot(x, np.maximum((x-.2)**3,0), linewidth=2)\nplt.plot(x, 1/(1+np.exp(-(x-8)))*4000, linewidth=2)\nplt.plot(x, np.tanh(x-5.0)*500+500, linewidth=2)\nplt.xticks([-4, -2, 0, 2, 4], fontsize=14)\nplt.yticks([0, 50, 100, 150], fontsize=14)\nplt.plot([-5,5], [0, 0], 'k', alpha=0.1)\nplt.plot([-0,0], [-5, 200], 'k', alpha=0.1)\nax.set_ybound([-5, 100])\nax.set_xbound([-5, 5])\nplt.xlabel('x', fontsize=14)\nplt.ylabel('f(x)', fontsize=14)\nplt.legend(['Exp', 'Modified-Relu', 'Modified-Sigmoid', 'Modified-Tanh'], frameon=False, fontsize=12)\noutfile = os.path.join('../results', 'modified-activations_zoom.pdf')\nfig.savefig(outfile, format='pdf', dpi=200, bbox_inches='tight')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code" ] ]
d0139e83db77fa33dfc064c37a926857d574815f
168,210
ipynb
Jupyter Notebook
Image Classifier Project.ipynb
stoykostanchev/aipnd-project
da4114caca3060b8058bf1019ee4edeb9135a27d
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
stoykostanchev/aipnd-project
da4114caca3060b8058bf1019ee4edeb9135a27d
[ "MIT" ]
null
null
null
Image Classifier Project.ipynb
stoykostanchev/aipnd-project
da4114caca3060b8058bf1019ee4edeb9135a27d
[ "MIT" ]
null
null
null
360.192719
140,020
0.9296
[ [ [ "# Developing an AI application\n\nGoing forward, AI algorithms will be incorporated into more and more everyday applications. For example, you might want to include an image classifier in a smart phone app. To do this, you'd use a deep learning model trained on hundreds of thousands of images as part of the overall application architecture. A large part of software development in the future will be using these types of models as common parts of applications. \n\nIn this project, you'll train an image classifier to recognize different species of flowers. You can imagine using something like this in a phone app that tells you the name of the flower your camera is looking at. In practice you'd train this classifier, then export it for use in your application. We'll be using [this dataset](http://www.robots.ox.ac.uk/~vgg/data/flowers/102/index.html) of 102 flower categories, you can see a few examples below. \n\n<img src='assets/Flowers.png' width=500px>\n\nThe project is broken down into multiple steps:\n\n* Load and preprocess the image dataset\n* Train the image classifier on your dataset\n* Use the trained classifier to predict image content\n\nWe'll lead you through each part which you'll implement in Python.\n\nWhen you've completed this project, you'll have an application that can be trained on any set of labeled images. Here your network will be learning about flowers and end up as a command line application. But, what you do with your new skills depends on your imagination and effort in building a dataset. For example, imagine an app where you take a picture of a car, it tells you what the make and model is, then looks up information about it. Go build your own dataset and make something new.\n\nFirst up is importing the packages you'll need. It's good practice to keep all the imports at the beginning of your code. As you work through this notebook and find you need to import a package, make sure to add the import up here.\n\nPlease make sure if you are running this notebook in the workspace that you have chosen GPU rather than CPU mode.", "_____no_output_____" ] ], [ [ "# Imports here\nimport numpy as np\nimport torch\nimport data_utils\nimport train_f as train\nfrom utils import get_saved_model, get_device, get_checkpoints_path, evaluate_model\nimport predict_f as predict\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "## Load the data\n\nHere you'll use `torchvision` to load the data ([documentation](http://pytorch.org/docs/0.3.0/torchvision/index.html)). The data should be included alongside this notebook, otherwise you can [download it here](https://s3.amazonaws.com/content.udacity-data.com/nd089/flower_data.tar.gz). The dataset is split into three parts, training, validation, and testing. For the training, you'll want to apply transformations such as random scaling, cropping, and flipping. This will help the network generalize leading to better performance. You'll also need to make sure the input data is resized to 224x224 pixels as required by the pre-trained networks.\n\nThe validation and testing sets are used to measure the model's performance on data it hasn't seen yet. For this you don't want any scaling or rotation transformations, but you'll need to resize then crop the images to the appropriate size.\n\nThe pre-trained networks you'll use were trained on the ImageNet dataset where each color channel was normalized separately. For all three sets you'll need to normalize the means and standard deviations of the images to what the network expects. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`, calculated from the ImageNet images. These values will shift each color channel to be centered at 0 and range from -1 to 1.\n ", "_____no_output_____" ] ], [ [ "data_dir = 'flowers'\ntrain_dir = data_dir + '/train'\nvalid_dir = data_dir + '/valid'\ntest_dir = data_dir + '/test'", "_____no_output_____" ], [ "# TODO: Define your transforms for the training, validation, and testing sets\ndataloaders, image_datasets, data_transforms = data_utils.get_data(data_dir, train_dir, valid_dir, test_dir)", "_____no_output_____" ] ], [ [ "### Label mapping\n\nYou'll also need to load in a mapping from category label to category name. You can find this in the file `cat_to_name.json`. It's a JSON object which you can read in with the [`json` module](https://docs.python.org/2/library/json.html). This will give you a dictionary mapping the integer encoded categories to the actual names of the flowers.", "_____no_output_____" ] ], [ [ "import json\n\nwith open('cat_to_name.json', 'r') as f:\n cat_to_name = json.load(f)", "_____no_output_____" ] ], [ [ "# Building and training the classifier\n\nNow that the data is ready, it's time to build and train the classifier. As usual, you should use one of the pretrained models from `torchvision.models` to get the image features. Build and train a new feed-forward classifier using those features.\n\nWe're going to leave this part up to you. Refer to [the rubric](https://review.udacity.com/#!/rubrics/1663/view) for guidance on successfully completing this section. Things you'll need to do:\n\n* Load a [pre-trained network](http://pytorch.org/docs/master/torchvision/models.html) (If you need a starting point, the VGG networks work great and are straightforward to use)\n* Define a new, untrained feed-forward network as a classifier, using ReLU activations and dropout\n* Train the classifier layers using backpropagation using the pre-trained network to get the features\n* Track the loss and accuracy on the validation set to determine the best hyperparameters\n\nWe've left a cell open for you below, but use as many as you need. Our advice is to break the problem up into smaller parts you can run separately. Check that each part is doing what you expect, then move on to the next. You'll likely find that as you work through each part, you'll need to go back and modify your previous code. This is totally normal!\n\nWhen training make sure you're updating only the weights of the feed-forward network. You should be able to get the validation accuracy above 70% if you build everything right. Make sure to try different hyperparameters (learning rate, units in the classifier, epochs, etc) to find the best model. Save those hyperparameters to use as default values in the next part of the project.\n\nOne last important tip if you're using the workspace to run your code: To avoid having your workspace disconnect during the long-running tasks in this notebook, please read in the earlier page in this lesson called Intro to GPU Workspaces about Keeping Your Session Active. You'll want to include code from the workspace_utils.py module.", "_____no_output_____" ] ], [ [ "# arch = 'resnet34'\n# arch = 'inception_v3' -> Expected tensor for argument #1 'input' to have the same dimension as tensor for 'result'; but 4 does not equal 2 (while checking arguments for cudnn_convolution)\n# arch = 'densenet161'\narch = 'vgg16'\ntrain.train(data_dir, cat_to_name, './', max_epochs=1, arch=arch)", "- Loaded model from a checkpoint -\n- Input features: - 25088\n- Continuing training from a previous state -\n- Starting from epoch: 0\n- End epoch: 1\n- Training ... -\nEpoch: 1/1 ... Steps: 50 ... Train loss: 1.3225 ... Train accuracy: 65%\nEpoch: 1/1 ... Steps: 100 ... Train loss: 1.3011 ... Train accuracy: 66%\nEvaluating epoch 1 ... \n- Calculating accuracy and loss ... -\nValidation loss: 0.0261 ... Validation accuracy: 65 %\nSaving model\nPath: ./state.pt\nTraining complete\n" ] ], [ [ "## Testing your network\n\nIt's good practice to test your trained network on test data, images the network has never seen either in training or validation. This will give you a good estimate for the model's performance on completely new images. Run the test images through the network and measure the accuracy, the same way you did validation. You should be able to reach around 70% accuracy on the test set if the model has been trained well.", "_____no_output_____" ] ], [ [ "model = get_saved_model(arch=arch)\nmodel.to(get_device())\nmodel.eval()\nacc, _ = evaluate_model(dataloaders['test'], model)\nprint('Accuracy on the test dataset: %d %%' % (acc))", "- Loaded model from a checkpoint -\n- Input features: - 25088\n- Calculating accuracy and loss ... -\nAccuracy on the test dataset: 65 %\n" ] ], [ [ "## Save the checkpoint\n\nNow that your network is trained, save the model so you can load it later for making predictions. You probably want to save other things such as the mapping of classes to indices which you get from one of the image datasets: `image_datasets['train'].class_to_idx`. You can attach this to the model as an attribute which makes inference easier later on.\n\n```model.class_to_idx = image_datasets['train'].class_to_idx```\n\nRemember that you'll want to completely rebuild the model later so you can use it for inference. Make sure to include any information you need in the checkpoint. If you want to load the model and keep training, you'll want to save the number of epochs as well as the optimizer state, `optimizer.state_dict`. You'll likely want to use this trained model in the next part of the project, so best to save it now.", "_____no_output_____" ] ], [ [ "# See utils.save_checkpoint", "_____no_output_____" ] ], [ [ "## Loading the checkpoint\n\nAt this point it's good to write a function that can load a checkpoint and rebuild the model. That way you can come back to this project and keep working on it without having to retrain the network.", "_____no_output_____" ] ], [ [ "# See utils.get_saved_model", "_____no_output_____" ] ], [ [ "# Inference for classification\n\nNow you'll write a function to use a trained network for inference. That is, you'll pass an image into the network and predict the class of the flower in the image. Write a function called `predict` that takes an image and a model, then returns the top $K$ most likely classes along with the probabilities. It should look like \n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```\n\nFirst you'll need to handle processing the input image such that it can be used in your network. \n\n## Image Preprocessing\n\nYou'll want to use `PIL` to load the image ([documentation](https://pillow.readthedocs.io/en/latest/reference/Image.html)). It's best to write a function that preprocesses the image so it can be used as input for the model. This function should process the images in the same manner used for training. \n\nFirst, resize the images where the shortest side is 256 pixels, keeping the aspect ratio. This can be done with the [`thumbnail`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) or [`resize`](http://pillow.readthedocs.io/en/3.1.x/reference/Image.html#PIL.Image.Image.thumbnail) methods. Then you'll need to crop out the center 224x224 portion of the image.\n\nColor channels of images are typically encoded as integers 0-255, but the model expected floats 0-1. You'll need to convert the values. It's easiest with a Numpy array, which you can get from a PIL image like so `np_image = np.array(pil_image)`.\n\nAs before, the network expects the images to be normalized in a specific way. For the means, it's `[0.485, 0.456, 0.406]` and for the standard deviations `[0.229, 0.224, 0.225]`. You'll want to subtract the means from each color channel, then divide by the standard deviation. \n\nAnd finally, PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array. You can reorder dimensions using [`ndarray.transpose`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.ndarray.transpose.html). The color channel needs to be first and retain the order of the other two dimensions.", "_____no_output_____" ] ], [ [ "# See predict.process_image", "_____no_output_____" ] ], [ [ "To check your work, the function below converts a PyTorch tensor and displays it in the notebook. If your `process_image` function works, running the output through this function should return the original image (except for the cropped out portions).", "_____no_output_____" ] ], [ [ "def imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n \n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n \n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n \n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n \n ax.imshow(image)\n \n return ax\n\n#img = process_image('./flowers/valid/59/image_05034.jpg')\n#imshow(torch.from_numpy(img).float())", "_____no_output_____" ] ], [ [ "## Class Prediction\n\nOnce you can get images in the correct format, it's time to write a function for making predictions with your model. A common practice is to predict the top 5 or so (usually called top-$K$) most probable classes. You'll want to calculate the class probabilities then find the $K$ largest values.\n\nTo get the top $K$ largest values in a tensor use [`x.topk(k)`](http://pytorch.org/docs/master/torch.html#torch.topk). This method returns both the highest `k` probabilities and the indices of those probabilities corresponding to the classes. You need to convert from these indices to the actual class labels using `class_to_idx` which hopefully you added to the model or from an `ImageFolder` you used to load the data ([see here](#Save-the-checkpoint)). Make sure to invert the dictionary so you get a mapping from index to class as well.\n\nAgain, this method should take a path to an image and a model checkpoint, then return the probabilities and classes.\n\n```python\nprobs, classes = predict(image_path, model)\nprint(probs)\nprint(classes)\n> [ 0.01558163 0.01541934 0.01452626 0.01443549 0.01407339]\n> ['70', '3', '45', '62', '55']\n```", "_____no_output_____" ] ], [ [ "\npredict.predict('./flowers/valid/59/image_05034.jpg', get_saved_model(arch=arch))", "- Loaded model from a checkpoint -\n- Input features: - 25088\n" ] ], [ [ "## Sanity Checking\n\nNow that you can use a trained model for predictions, check to make sure it makes sense. Even if the testing accuracy is high, it's always good to check that there aren't obvious bugs. Use `matplotlib` to plot the probabilities for the top 5 classes as a bar graph, along with the input image. It should look like this:\n\n<img src='assets/inference_example.png' width=300px>\n\nYou can convert from the class integer encoding to actual flower names with the `cat_to_name.json` file (should have been loaded earlier in the notebook). To show a PyTorch tensor as an image, use the `imshow` function defined above.", "_____no_output_____" ] ], [ [ "# TODO: Display an image along with the top 5 classes\nmodel = get_saved_model(arch=arch)\nmodel.eval()\ncheckpoint = torch.load(get_checkpoints_path(), map_location=str(get_device()))\nimg_path = './flowers/valid/76/image_02458.jpg'\nreal_class = checkpoint['cat_to_name'].get(str(img_path.split('/')[3]))\n\nprint(real_class)\nimg = predict.process_image(img_path)\nimshow(torch.from_numpy(img).float())\n\nprobs, classes = predict.predict(img_path, model)\nidx_to_class = {v: k for k, v in checkpoint['class_to_idx'].items()}\n\ncategories = [checkpoint['cat_to_name'].get(str(idx_to_class.get(x.item()))) for x in classes]\n\nfig, ax = plt.subplots()\n\nax.barh(categories, probs, align='center')\n\nplt.show()\n", "- Loaded model from a checkpoint -\n- Input features: - 25088\nmorning glory\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d013acfaef8ff9b833ce7843db6371b226c96b11
18,230
ipynb
Jupyter Notebook
week_5/testtestetst.ipynb
tazV2/Applied-Data-Science-Capstone
6f471e4c4dfd7de7e3d85d51b67660eb138a0cd2
[ "MIT" ]
null
null
null
week_5/testtestetst.ipynb
tazV2/Applied-Data-Science-Capstone
6f471e4c4dfd7de7e3d85d51b67660eb138a0cd2
[ "MIT" ]
1
2021-08-22T17:23:06.000Z
2021-08-22T17:23:06.000Z
week_5/testtestetst.ipynb
tazV2/Applied-Data-Science-Capstone
6f471e4c4dfd7de7e3d85d51b67660eb138a0cd2
[ "MIT" ]
null
null
null
39.288793
251
0.418376
[ [ [ "import requests\nimport pandas as pd\nfrom geopy.geocoders import Nominatim\nimport folium\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport matplotlib.cm as cm\nimport matplotlib.colors as colors\n\n", "_____no_output_____" ], [ "def get_lat_long(address):\n geolocator = Nominatim(user_agent=\"foursquare_agent\")\n location = geolocator.geocode(address)\n latitude = location.latitude\n longitude = location.longitude\n print(address, latitude, longitude)\n return latitude, longitude\nget_lat_long(\"new york\")", "new york 40.7127281 -74.0060152\n" ], [ "def get_category_type(row):\n try:\n categories_list = row['categories']\n except:\n categories_list = row['venue.categories']\n \n if len(categories_list) == 0:\n return None\n else:\n return categories_list[0]['name']", "_____no_output_____" ], [ "def saving_data(area_name, latitude, longitude):\n url = f\"https://api.foursquare.com/v2/venues/explore?client_id=N40W0THAJDZYKLHLHWRDBU01LIMNXBMXZ03X5ZOGZSRVMLSR&client_secret=GZNJSGG3SLAYGMN4BJCC5VIOFBT2EW10OUF1V20ZO4P3V2YT&ll={latitude},{longitude}&v=20180604&radius=30000&limit=1000\"\n results = requests.get(url).json()\n dataframe = pd.json_normalize(results[\"response\"][\"groups\"][0][\"items\"])\n filtered_columns = ['venue.name', 'venue.categories'] + [col for col in dataframe.columns if col.startswith('venue.location.')] + ['venue.id']\n dataframe_filtered = dataframe.loc[:, filtered_columns]\n dataframe_filtered['venue.categories'] = dataframe_filtered.apply(get_category_type, axis=1) # category for each row\n dataframe_filtered.columns = [col.split('.')[-1] for col in dataframe_filtered.columns] # clean columns\n \n dataframe_filtered.to_pickle(f\"{area_name}.pkl\") # save to pickle so that wont call the api over and over again\n return dataframe_filtered", "_____no_output_____" ], [ "saving_data(\"new york\", 40.7127281, -74.0060152)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d013cc1b2c3e7c687606dbd84ad8f82983337fbb
11,295
ipynb
Jupyter Notebook
decision tree 2.ipynb
Pranao-S/Multiple-Salesman-Travelling-Problem
cbe4b2c5bf460e32a19f178491df2d5525acad35
[ "MIT" ]
null
null
null
decision tree 2.ipynb
Pranao-S/Multiple-Salesman-Travelling-Problem
cbe4b2c5bf460e32a19f178491df2d5525acad35
[ "MIT" ]
null
null
null
decision tree 2.ipynb
Pranao-S/Multiple-Salesman-Travelling-Problem
cbe4b2c5bf460e32a19f178491df2d5525acad35
[ "MIT" ]
null
null
null
37.400662
104
0.402568
[ [ [ "import pandas as pd\ndf = pd.read_csv(\"zomato.csv\", encoding=\"ISO-8859-1\") \n## encoding was not mentioned so error in output\n## the csv file trying to read was different than the default encoding\ndf.head()", "_____no_output_____" ], [ "## first the target variable has to be found out and then accordingly operations have to performed", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d013d61f17b8cc3c65d9bad2b2f6c1b759b100e8
54,184
ipynb
Jupyter Notebook
day1/notebooks/.ipynb_checkpoints/lgde_basic-checkpoint.ipynb
sw-woo/data-engineer-basic-training
40a1b4637482b237176811556cf4cacc006e0a9d
[ "Apache-2.0" ]
1
2021-08-09T07:31:00.000Z
2021-08-09T07:31:00.000Z
day1/notebooks/.ipynb_checkpoints/lgde_basic-checkpoint.ipynb
sw-woo/data-engineer-basic-training
40a1b4637482b237176811556cf4cacc006e0a9d
[ "Apache-2.0" ]
null
null
null
day1/notebooks/.ipynb_checkpoints/lgde_basic-checkpoint.ipynb
sw-woo/data-engineer-basic-training
40a1b4637482b237176811556cf4cacc006e0a9d
[ "Apache-2.0" ]
null
null
null
48.249332
6,985
0.509302
[ [ [ "# 스파크를 이용한 기본 지표 생성 예제\n> 기본 지표를 생성하는 데에 있어, 정해진 틀을 그대로 따라하기 보다, 가장 직관적인 방법을 지속적으로 개선하는 과정을 설명하기 위한 예제입니다. \n첫 번째 예제인 만큼 지표의 복잡도를 줄이기 위해 해당 서비스를 오픈 일자는 2020/10/25 이며, 지표를 집계하는 시점은 2020/10/26 일 입니다\n\n* 원본 데이터를 그대로 읽는 방법\n* dataframe api 를 이용하는 방법\n* spark.sql 을 이용하는 방법\n* 기본 지표 (DAU, PU)를 추출하는 예제 실습\n* 날짜에 대한 필터를 넣는 방법\n* 날짜에 대한 필터를 데이터 소스에 적용하는 방법\n* 기본 지표 (ARPU, ARPPU)를 추출하는 예제 실습\n* 스칼라 값을 가져와서 다음 질의문에 적용하는 방법\n* 누적 금액을 구할 때에 단순한 방법\n* 서비스 오픈 일자의 디멘젼 테이블을 생성하는 방법\n* 널 값에 대한 처리하는 방법\n* 생성된 데이터 프레임을 저장하는 방법\n* 전 일자 데이터를 가져오는 방법\n* 요약 지표를 생성할 때에 단순한 방법\n* 팩트 테이블을 활용하는 방법", "_____no_output_____" ] ], [ [ "from pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\nspark = SparkSession \\\n .builder \\\n .appName(\"Data Engineer Basic Day3\") \\\n .config(\"spark.dataengineer.basic.day3\", \"tutorial-1\") \\\n .getOrCreate()", "_____no_output_____" ], [ "spark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").json(\"access/20201026\") \\\n.withColumn(\"gmt_time\", expr(\"from_unixtime(a_time, 'yyyy-MM-dd HH:mm:ss')\")) \\\n.withColumn(\"localtime\", expr(\"from_utc_timestamp(from_unixtime(a_time), 'Asis/Seoul')\")) \\\n.show()\n\n# from_utc_timestamp(from_unixtime(epoch_time), tz_name) as local_time", "+------+------+----------+--------------------+-----+-------------------+-------------------+\n| a_id| a_tag| a_time| a_timestamp|a_uid| gmt_time| localtime|\n+------+------+----------+--------------------+-----+-------------------+-------------------+\n|logout|access|1603647200|2020-10-26 02:33:...| 1|2020-10-25 17:33:20|2020-10-25 17:33:20|\n|logout|access|1603650200|2020-10-26 03:23:...| 2|2020-10-25 18:23:20|2020-10-25 18:23:20|\n|logout|access|1603659200|2020-10-26 05:53:...| 3|2020-10-25 20:53:20|2020-10-25 20:53:20|\n|logout|access|1603664200|2020-10-26 07:16:...| 4|2020-10-25 22:16:40|2020-10-25 22:16:40|\n|logout|access|1603669500|2020-10-26 08:45:...| 5|2020-10-25 23:45:00|2020-10-25 23:45:00|\n| login|access|1603645200|2020-10-26 02:00:...| 1|2020-10-25 17:00:00|2020-10-25 17:00:00|\n| login|access|1603649200|2020-10-26 03:06:...| 2|2020-10-25 18:06:40|2020-10-25 18:06:40|\n| login|access|1603653200|2020-10-26 04:13:...| 2|2020-10-25 19:13:20|2020-10-25 19:13:20|\n| login|access|1603657200|2020-10-26 05:20:...| 3|2020-10-25 20:20:00|2020-10-25 20:20:00|\n| login|access|1603660200|2020-10-26 06:10:...| 4|2020-10-25 21:10:00|2020-10-25 21:10:00|\n| login|access|1603664500|2020-10-26 07:21:...| 4|2020-10-25 22:21:40|2020-10-25 22:21:40|\n| login|access|1603666500|2020-10-26 07:55:...| 5|2020-10-25 22:55:00|2020-10-25 22:55:00|\n| login|access|1603670500|2020-10-26 09:01:...| 6|2020-10-26 00:01:40|2020-10-26 00:01:40|\n| login|access|1603673500|2020-10-26 09:51:...| 7|2020-10-26 00:51:40|2020-10-26 00:51:40|\n| login|access|1603674500|2020-10-26 10:08:...| 8|2020-10-26 01:08:20|2020-10-26 01:08:20|\n| login|access|1603675500|2020-10-26 10:25:...| 9|2020-10-26 01:25:00|2020-10-26 01:25:00|\n+------+------+----------+--------------------+-----+-------------------+-------------------+\n\n" ], [ "# spark.conf.unset(\"spark.sql.session.timeZone\")\nspark.conf.get(\"spark.sql.session.timeZone\") # 'Etc/UTC' => 이게 원인이었네 ... 초기 값의 TimeZone 설정이 제대로 안 되어 있었음.;ㅁ;\nspark.conf.set(\"spark.sql.session.timeZone\", \"Asia/Seoul\")\nspark.conf.get(\"spark.sql.session.timeZone\")", "_____no_output_____" ], [ "spark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").json(\"access/20201026\") \\\n.withColumn(\"gmt_time\", expr(\"from_unixtime(a_time, 'yyyy-MM-dd HH:mm:ss')\")) \\\n.withColumn(\"localtime\", expr(\"from_utc_timestamp(from_unixtime(a_time), 'Asis/Seoul')\")) \\\n.show()", "+------+------+----------+--------------------+-----+-------------------+-------------------+\n| a_id| a_tag| a_time| a_timestamp|a_uid| gmt_time| localtime|\n+------+------+----------+--------------------+-----+-------------------+-------------------+\n|logout|access|1603647200|2020-10-26 02:33:...| 1|2020-10-26 02:33:20|2020-10-26 02:33:20|\n|logout|access|1603650200|2020-10-26 03:23:...| 2|2020-10-26 03:23:20|2020-10-26 03:23:20|\n|logout|access|1603659200|2020-10-26 05:53:...| 3|2020-10-26 05:53:20|2020-10-26 05:53:20|\n|logout|access|1603664200|2020-10-26 07:16:...| 4|2020-10-26 07:16:40|2020-10-26 07:16:40|\n|logout|access|1603669500|2020-10-26 08:45:...| 5|2020-10-26 08:45:00|2020-10-26 08:45:00|\n| login|access|1603645200|2020-10-26 02:00:...| 1|2020-10-26 02:00:00|2020-10-26 02:00:00|\n| login|access|1603649200|2020-10-26 03:06:...| 2|2020-10-26 03:06:40|2020-10-26 03:06:40|\n| login|access|1603653200|2020-10-26 04:13:...| 2|2020-10-26 04:13:20|2020-10-26 04:13:20|\n| login|access|1603657200|2020-10-26 05:20:...| 3|2020-10-26 05:20:00|2020-10-26 05:20:00|\n| login|access|1603660200|2020-10-26 06:10:...| 4|2020-10-26 06:10:00|2020-10-26 06:10:00|\n| login|access|1603664500|2020-10-26 07:21:...| 4|2020-10-26 07:21:40|2020-10-26 07:21:40|\n| login|access|1603666500|2020-10-26 07:55:...| 5|2020-10-26 07:55:00|2020-10-26 07:55:00|\n| login|access|1603670500|2020-10-26 09:01:...| 6|2020-10-26 09:01:40|2020-10-26 09:01:40|\n| login|access|1603673500|2020-10-26 09:51:...| 7|2020-10-26 09:51:40|2020-10-26 09:51:40|\n| login|access|1603674500|2020-10-26 10:08:...| 8|2020-10-26 10:08:20|2020-10-26 10:08:20|\n| login|access|1603675500|2020-10-26 10:25:...| 9|2020-10-26 10:25:00|2020-10-26 10:25:00|\n+------+------+----------+--------------------+-----+-------------------+-------------------+\n\n" ], [ "sc = spark.sparkContext\nspark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").parquet(\"user/20201025\").createOrReplaceTempView(\"user\")\n\npWhere=\"\"\nspark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").parquet(\"purchase/20201025\").withColumn(\"p_time\", expr(\"from_unixtime(p_time)\")).createOrReplaceTempView(\"purchase\")\n\naWhere=\"\"\nspark.read.option(\"inferSchema\", \"true\").option(\"header\", \"true\").json(\"access/20201026\").withColumn(\"a_time\", expr(\"from_unixtime(a_time)\")).createOrReplaceTempView(\"access\")", "_____no_output_____" ], [ "spark.sql(\"desc user\").show()\nspark.sql(\"desc purchase\").show()\nspark.sql(\"desc access\").show()", "+--------+---------+-------+\n|col_name|data_type|comment|\n+--------+---------+-------+\n| u_id| int| null|\n| u_name| string| null|\n|u_gender| string| null|\n|u_signup| int| null|\n+--------+---------+-------+\n\n+--------+---------+-------+\n|col_name|data_type|comment|\n+--------+---------+-------+\n| p_time| string| null|\n| p_uid| int| null|\n| p_id| int| null|\n| p_name| string| null|\n| p_amoun| int| null|\n+--------+---------+-------+\n\n+-----------+---------+-------+\n| col_name|data_type|comment|\n+-----------+---------+-------+\n| a_id| string| null|\n| a_tag| string| null|\n| a_time| string| null|\n|a_timestamp| string| null|\n| a_uid| string| null|\n+-----------+---------+-------+\n\n" ] ], [ [ "### 과제 1. 주어진 데이터를 이용하여 2020/10/25 기준의 DAU, PU 를 구하시오\n* DAU : Daily Active User, 일 별 접속자 수\n - log_access 를 통해 unique 한 a_uid 값을 구합니다\n* PU : Purchase User, 일 별 구매자 수\n - tbl_purchase 를 통해 unique 한 p_uid 값을 구합니다\n\n> 값을 구하기 전에 Spark API 대신 Spark SQL 을 이용하기 위해 [createOrReplaceTempView](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=createorreplace#pyspark.sql.DataFrame.createOrReplaceTempView) 를 생성합니다", "_____no_output_____" ] ], [ [ "# DAU - access\nspark.sql(\"select a_time as a_time, a_uid from access\").show()\ndau = spark.sql(\"select count(distinct a_uid) as DAU from access where a_time >= '2020-10-25 00:00:00' and a_time < '2020-10-26 00:00:00'\")\ndau.show()", "+-------------------+-----+\n| a_time|a_uid|\n+-------------------+-----+\n|2020-10-25 17:33:20| 1|\n|2020-10-25 18:23:20| 2|\n|2020-10-25 20:53:20| 3|\n|2020-10-25 22:16:40| 4|\n|2020-10-25 23:45:00| 5|\n|2020-10-25 17:00:00| 1|\n|2020-10-25 18:06:40| 2|\n|2020-10-25 19:13:20| 2|\n|2020-10-25 20:20:00| 3|\n|2020-10-25 21:10:00| 4|\n|2020-10-25 22:21:40| 4|\n|2020-10-25 22:55:00| 5|\n|2020-10-26 00:01:40| 6|\n|2020-10-26 00:51:40| 7|\n|2020-10-26 01:08:20| 8|\n|2020-10-26 01:25:00| 9|\n+-------------------+-----+\n\n+---+\n|DAU|\n+---+\n| 5|\n+---+\n\n" ], [ "# PU - purchase\nspark.sql(\"select p_time, p_uid from purchase\").show()\npu = spark.sql(\"select count(distinct p_uid) as PU from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\")\npu.show()", "+-------------------+-----+\n| p_time|p_uid|\n+-------------------+-----+\n|2020-10-25 18:45:50| 1|\n|2020-10-26 06:45:55| 1|\n|2020-10-26 00:51:40| 2|\n|2020-10-25 18:55:55| 3|\n|2020-10-26 01:08:20| 4|\n|2020-10-25 22:45:55| 5|\n|2020-10-25 22:49:15| 5|\n+-------------------+-----+\n\n+---+\n| PU|\n+---+\n| 3|\n+---+\n\n" ], [ "v_dau = dau.collect()[0][\"DAU\"]\nv_pu = pu.collect()[0][\"PU\"]", "_____no_output_____" ] ], [ [ "### 과제 2. 주어진 데이터를 이용하여 2020/10/25 기준의 ARPU, ARPPU 를 구하시오\n* ARPU : Average Revenue Per User, 유저 당 평균 수익\n - 해당 일자의 전체 수익 (Total Purchase Amount) / 해당 일에 접속한 유저 수 (DAU)\n* ARPPU : Average Revenue Per Purchase User, 구매 유저 당 평균 수익\n - 해당 일자의 전체 수익 (Total Purchase Amount) / 해당 일에 접속한 구매 유저 수 (PU)", "_____no_output_____" ] ], [ [ "# ARPU - total purchase amount, dau\n\nquery=\"select sum(p_amount) / {} from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\".format(v_dau)\nprint(query)\n\ntotal_purchase_amount = spark.sql(\"select sum(p_amount) as total_purchase_amount from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\")\ntotal_purchase_amount.show()\n\nspark.sql(\"select sum(p_amount) / 5 from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\").show()\n\nspark.sql(\"select sum(p_amount) / {} as ARPU from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\".format(v_dau)).show()", "select sum(p_amount) / 5 from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00'\n" ], [ "# ARPPU - total purchase amount, pu\nv_amt = total_purchase_amount.collect()[0][\"total_purchase_amount\"]\nprint(\"| ARPPU | {} |\".format(v_amt / v_pu))", "| ARPPU | 3000000.0 |\n" ] ], [ [ "### 과제 3. 주어진 데이터를 이용하여 2020/10/26 현재의 \"누적 매출 금액\" 과 \"누적 접속 유저수\"를 구하시오\n* 누적 매출 금액 : 10/25 (오픈) ~ 현재\n - 전체 로그를 읽어서 매출 금액의 합을 구한다\n - 유저별 매출 정보를 누적하여 저장해두고 재활용한다\n* 누적 접속 유저수 : 10/25 (오픈) ~ 현재\n - 전체 로그를 읽어서 접속자의 유일한 수를 구한다\n - 유저별 접속 정보를 누적하여 저장해두고 재활용한다", "_____no_output_____" ] ], [ [ "# 누적 매출 금액\nspark.sql(\"select sum(p_amount) from purchase \").show()\n\n# 누적 접속 유저수\nspark.sql(\"select count(distinct a_uid) from access\").show()", "+-------------+\n|sum(p_amount)|\n+-------------+\n| 16700000|\n+-------------+\n\n+---------------------+\n|count(DISTINCT a_uid)|\n+---------------------+\n| 9|\n+---------------------+\n\n" ] ], [ [ "### 과제 4. 유저별 정보를 누적시키기 위한 디멘젼 테이블을 설계하고 생성합니다\n\n#### User Dimension 테이블 설계\n| 컬럼 명 | 컬럼 타입 | 컬럼 설명 |\n| :- | :-: | :- |\n| d_uid | int | 유저 아이디 |\n| d_name | string | 고객 이름 |\n| d_pamount | int | 누적 구매 금액 |\n| d_pcount | int | 누적 구매 횟수 |\n| d_acount | int | 누적 접속 횟수 |", "_____no_output_____" ] ], [ [ "# 오픈 첫 날의 경우 예외적으로 별도의 프로그램을 작성합니다\n# \n# 1. 가장 큰 레코드 수를 가진 정보가 접속정보이므로 해당 일자의 이용자 별 접속 횟수를 추출합니다\n# 단, login 횟수를 접속 횟수로 가정합니다 - logout 만 있는 경우는 login 유실 혹은 전일자의 로그이므로 이러한 경우는 제외합니다\nspark.sql(\"describe access\").show()\nspark.sql(\"select * from access where a_id = 'login' and a_time >= '2020-10-25 00:00:00' and a_time < '2020-10-26 00:00:00'\").show()\nuids = spark.sql(\"select a_uid, count(a_uid) as acount from access where a_id = 'login' and a_time >= '2020-10-25 00:00:00' and a_time < '2020-10-26 00:00:00' group by a_uid\")\nuids.show()", "+--------+---------+-------+\n|col_name|data_type|comment|\n+--------+---------+-------+\n| a_time| string| null|\n| a_uid| int| null|\n| a_id| string| null|\n+--------+---------+-------+\n\n+-------------------+-----+-----+\n| a_time|a_uid| a_id|\n+-------------------+-----+-----+\n|2020-10-25 17:00:00| 1|login|\n|2020-10-25 18:06:40| 2|login|\n|2020-10-25 19:13:20| 2|login|\n|2020-10-25 20:20:00| 3|login|\n|2020-10-25 21:10:00| 4|login|\n|2020-10-25 22:21:40| 4|login|\n|2020-10-25 22:55:00| 5|login|\n+-------------------+-----+-----+\n\n+-----+------+\n|a_uid|acount|\n+-----+------+\n| 1| 1|\n| 3| 1|\n| 5| 1|\n| 4| 2|\n| 2| 2|\n+-----+------+\n\n" ], [ "# 2. 해당 일자의 이용자 별 총 매출 금액과, 구매 횟수를 추출합니다\nspark.sql(\"describe purchase\").show()\namts = spark.sql(\"select p_uid, sum(p_amount) as pamount, count(p_uid) as pcount from purchase where p_time >= '2020-10-25 00:00:00' and p_time < '2020-10-26 00:00:00' group by p_uid\")\namts.show()", "+--------+---------+-------+\n|col_name|data_type|comment|\n+--------+---------+-------+\n| p_time| string| null|\n| p_uid| int| null|\n| p_id| int| null|\n| p_name| string| null|\n|p_amount| int| null|\n+--------+---------+-------+\n\n+-----+-------+------+\n|p_uid|pamount|pcount|\n+-----+-------+------+\n| 1|2000000| 1|\n| 3|1000000| 1|\n| 5|6000000| 2|\n+-----+-------+------+\n\n" ], [ "# 3. 이용자 접속횟수 + 총구매금액 + 구매횟수 (uids + amts)\nuids.printSchema()\namts.printSchema()\n\ndim1 = uids.join(amts, uids[\"a_uid\"] == amts[\"p_uid\"], how=\"left\").sort(uids[\"a_uid\"].asc())\ndim2 = dim1.withColumnRenamed(\"a_uid\", \"d_uid\") \\\n.withColumnRenamed(\"acount\", \"d_acount\") \\\n.drop(\"p_uid\") \\\n.withColumnRenamed(\"pamount\", \"d_pamount\") \\\n.withColumnRenamed(\"pcount\", \"d_pcount\")\ndim2.show()", "root\n |-- a_uid: integer (nullable = true)\n |-- acount: long (nullable = false)\n\nroot\n |-- p_uid: integer (nullable = true)\n |-- pamount: long (nullable = true)\n |-- pcount: long (nullable = false)\n\n+-----+--------+---------+--------+\n|d_uid|d_acount|d_pamount|d_pcount|\n+-----+--------+---------+--------+\n| 1| 1| 2000000| 1|\n| 2| 2| null| null|\n| 3| 1| 1000000| 1|\n| 4| 2| null| null|\n| 5| 1| 6000000| 2|\n+-----+--------+---------+--------+\n\n" ], [ "# 4. 이용자 정보를 덧붙입니다\nuser = spark.sql(\"select * from user\")\nuser.show()\n\ndim3 = dim2.join(user, dim2[\"d_uid\"] == user[\"u_id\"], \"left\")\ndim4 = dim3.withColumnRenamed(\"u_name\", \"d_name\") \\\n.withColumnRenamed(\"u_gender\", \"d_gender\")\n\ndim5 = dim4.select(\"d_uid\", \"d_name\", \"d_gender\", \"d_acount\", \"d_pamount\", \"d_pcount\")\ndimension = dim5.na.fill({\"d_pamount\":0, \"d_pcount\":0})\ndimension.show()", "+----+----------+--------+--------+\n|u_id| u_name|u_gender|u_signup|\n+----+----------+--------+--------+\n| 1| 정휘센| 남|19580808|\n| 2| 김싸이언| 남|19590201|\n| 3| 박트롬| 여|19951030|\n| 4| 청소기| 남|19770329|\n| 5|유코드제로| 여|20021029|\n| 6| 윤디오스| 남|20040101|\n| 7| 임모바일| 남|20040807|\n| 8| 조노트북| 여|20161201|\n| 9| 최컴퓨터| 남|20201124|\n+----+----------+--------+--------+\n\n+-----+----------+--------+--------+---------+--------+\n|d_uid| d_name|d_gender|d_acount|d_pamount|d_pcount|\n+-----+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 2| 김싸이언| 남| 2| 0| 0|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 4| 청소기| 남| 2| 0| 0|\n| 5|유코드제로| 여| 1| 6000000| 2|\n+-----+----------+--------+--------+---------+--------+\n\n" ], [ "# 4. 다음날 해당 데이터를 사용하도록 하기 위해 일자별 경로에 저장합니다\n# - ./users/dt=20201025/\ntarget=\"./users/dt=20201025\"\ndimension.write.mode(\"overwrite\").parquet(target)", "_____no_output_____" ] ], [ [ "### 과제 5. 전일자 디멘젼 정보를 이용하여 누적된 접속, 매출 지표를 생성합니다\n", "_____no_output_____" ] ], [ [ "# 이전 일자 기준의 고객의 상태를 유지하여 활용합니다\nyesterday = spark.read.parquet(target)\nyesterday.sort(yesterday[\"d_uid\"].asc()).show()", "+-----+----------+--------+--------+---------+--------+\n|d_uid| d_name|d_gender|d_acount|d_pamount|d_pcount|\n+-----+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 2| 김싸이언| 남| 2| 0| 0|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 4| 청소기| 남| 2| 0| 0|\n| 5|유코드제로| 여| 1| 6000000| 2|\n+-----+----------+--------+--------+---------+--------+\n\n" ], [ "# 5. 다음 날 동일한 지표를 생성하되 이전 일자의 정보에 누적한 지표를 생성합니다\n# 기존 테이블의 고객과 오늘 신규 고객을 모두 포함한 전체 데이터집합을 생성합니다\nyesterday.show()\n\n# 새로운 모수를 추가해야 하므로 전체 모수에 해당하는 uid 만을 추출합니다\nuid = yesterday.select(\"d_uid\").join(accs.select(\"a_uid\"), yesterday.d_uid == accs.a_uid, \"full_outer\") \\\n.withColumn(\"uid\", when(yesterday.d_uid.isNull(), accs.a_uid).otherwise(yesterday.d_uid)) \\\n.select(\"uid\")\nuid.show()\n\n# uid 기준으로 이름, 성별을 조인합니다\nuser.show()\ndim1 = uid.join(user, uid.uid == user.u_id).select(uid.uid, user.u_name, user.u_gender)\ndim1.show()\n\n# 어제 디멘젼을 기준으로 누적접속, 누적구매금액, 누적구매횟수 등을 조인합니다\nprint(\"dim2\")\ndim2 = dim1.join(yesterday, dim1.uid == yesterday.d_uid, \"left\") \\\n.select(dim1.uid, dim1.u_name, dim1.u_gender, yesterday.d_acount, yesterday.d_pamount, yesterday.d_pcount) \\\n.na.fill({\"d_acount\":0, \"d_pamount\":0, \"d_pcount\":0})\n\ndim2.show()\n\n# 6. 오늘 생성된 접속수치, 매출 및 매출 횟수를 더합니다 \naccs = spark.sql(\"select a_uid, count(a_uid) as acount from access where a_id = 'login' and a_time >= '2020-10-26 00:00:00' and a_time < '2020-10-27 00:00:00' group by a_uid\")\naccs.show()\n\nprint(\"dim3\")\ndim3 = dim2.join(accs, dim2.uid == accs.a_uid, \"left\") \\\n.withColumn(\"total_amount\", dim2.d_acount + when(accs.acount.isNull(), 0).otherwise(accs.acount)) \\\n.select(\"uid\", \"u_name\", \"u_gender\", \"total_amount\", \"d_pamount\", \"d_pcount\") \\\n.withColumnRenamed(\"total_amount\", \"d_acount\")\n\ndim3.show()\n\n# 오늘 발생한 매출을 더합니다\ndim3.show()\n\namts = spark.sql(\"select p_uid, sum(p_amount) as pamount, count(p_uid) as pcount from purchase where p_time >= '2020-10-26 00:00:00' and p_time < '2020-10-27 00:00:00' group by p_uid\")\namts.show()\n\nprint(\"dim4\")\ndim4 = dim3.join(amts, dim3.uid == amts.p_uid, \"left\") \\\n.withColumn(\"total_pamount\", dim3.d_pamount + when(amts.pamount.isNull(), 0).otherwise(amts.pamount)) \\\n.withColumn(\"total_pcount\", dim3.d_acount + when(amts.pcount.isNull(), 0).otherwise(amts.pcount)) \\\n.drop(\"d_pamount\", \"d_pcount\") \\\n.withColumnRenamed(\"uid\", \"d_uid\") \\\n.withColumnRenamed(\"u_name\", \"d_name\") \\\n.withColumnRenamed(\"u_gender\", \"d_gender\") \\\n.withColumnRenamed(\"total_pamount\", \"d_pamount\") \\\n.withColumnRenamed(\"total_pcount\", \"d_pcount\") \\\n.select(\"d_uid\", \"d_name\", \"d_gender\", \"d_acount\", \"d_pamount\", \"d_pcount\")\n\ndimension = dim4.sort(dim4.d_uid.asc()).coalesce(1)\ndimension.show()", "+-----+----------+--------+--------+---------+--------+\n|d_uid| d_name|d_gender|d_acount|d_pamount|d_pcount|\n+-----+----------+--------+--------+---------+--------+\n| 5|유코드제로| 여| 1| 6000000| 2|\n| 2| 김싸이언| 남| 2| 0| 0|\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 4| 청소기| 남| 2| 0| 0|\n+-----+----------+--------+--------+---------+--------+\n\n+---+\n|uid|\n+---+\n| 1|\n| 6|\n| 3|\n| 5|\n| 9|\n| 4|\n| 8|\n| 7|\n| 2|\n+---+\n\n+----+----------+--------+--------+\n|u_id| u_name|u_gender|u_signup|\n+----+----------+--------+--------+\n| 1| 정휘센| 남|19580808|\n| 2| 김싸이언| 남|19590201|\n| 3| 박트롬| 여|19951030|\n| 4| 청소기| 남|19770329|\n| 5|유코드제로| 여|20021029|\n| 6| 윤디오스| 남|20040101|\n| 7| 임모바일| 남|20040807|\n| 8| 조노트북| 여|20161201|\n| 9| 최컴퓨터| 남|20201124|\n+----+----------+--------+--------+\n\n+---+----------+--------+\n|uid| u_name|u_gender|\n+---+----------+--------+\n| 1| 정휘센| 남|\n| 6| 윤디오스| 남|\n| 3| 박트롬| 여|\n| 5|유코드제로| 여|\n| 9| 최컴퓨터| 남|\n| 4| 청소기| 남|\n| 8| 조노트북| 여|\n| 7| 임모바일| 남|\n| 2| 김싸이언| 남|\n+---+----------+--------+\n\ndim2\n+---+----------+--------+--------+---------+--------+\n|uid| u_name|u_gender|d_acount|d_pamount|d_pcount|\n+---+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 6| 윤디오스| 남| 0| 0| 0|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 5|유코드제로| 여| 1| 6000000| 2|\n| 9| 최컴퓨터| 남| 0| 0| 0|\n| 4| 청소기| 남| 2| 0| 0|\n| 8| 조노트북| 여| 0| 0| 0|\n| 7| 임모바일| 남| 0| 0| 0|\n| 2| 김싸이언| 남| 2| 0| 0|\n+---+----------+--------+--------+---------+--------+\n\n+-----+------+\n|a_uid|acount|\n+-----+------+\n| 6| 1|\n| 9| 1|\n| 8| 1|\n| 7| 1|\n+-----+------+\n\ndim3\n+---+----------+--------+--------+---------+--------+\n|uid| u_name|u_gender|d_acount|d_pamount|d_pcount|\n+---+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 6| 윤디오스| 남| 1| 0| 0|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 5|유코드제로| 여| 1| 6000000| 2|\n| 9| 최컴퓨터| 남| 1| 0| 0|\n| 4| 청소기| 남| 2| 0| 0|\n| 8| 조노트북| 여| 1| 0| 0|\n| 7| 임모바일| 남| 1| 0| 0|\n| 2| 김싸이언| 남| 2| 0| 0|\n+---+----------+--------+--------+---------+--------+\n\n+---+----------+--------+--------+---------+--------+\n|uid| u_name|u_gender|d_acount|d_pamount|d_pcount|\n+---+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 2000000| 1|\n| 6| 윤디오스| 남| 1| 0| 0|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 5|유코드제로| 여| 1| 6000000| 2|\n| 9| 최컴퓨터| 남| 1| 0| 0|\n| 4| 청소기| 남| 2| 0| 0|\n| 8| 조노트북| 여| 1| 0| 0|\n| 7| 임모바일| 남| 1| 0| 0|\n| 2| 김싸이언| 남| 2| 0| 0|\n+---+----------+--------+--------+---------+--------+\n\n+-----+-------+------+\n|p_uid|pamount|pcount|\n+-----+-------+------+\n| 1|1800000| 1|\n| 4|4500000| 1|\n| 2|1400000| 1|\n+-----+-------+------+\n\ndim4\n+-----+----------+--------+--------+---------+--------+\n|d_uid| d_name|d_gender|d_acount|d_pamount|d_pcount|\n+-----+----------+--------+--------+---------+--------+\n| 1| 정휘센| 남| 1| 3800000| 2|\n| 2| 김싸이언| 남| 2| 1400000| 3|\n| 3| 박트롬| 여| 1| 1000000| 1|\n| 4| 청소기| 남| 2| 4500000| 3|\n| 5|유코드제로| 여| 1| 6000000| 1|\n| 6| 윤디오스| 남| 1| 0| 1|\n| 7| 임모바일| 남| 1| 0| 1|\n| 8| 조노트북| 여| 1| 0| 1|\n| 9| 최컴퓨터| 남| 1| 0| 1|\n+-----+----------+--------+--------+---------+--------+\n\n" ], [ "# 7. 생성된 디멘젼을 20201026 경로에 저장합니다\ntarget=\"./users/dt=20201026\"\ndimension.write.mode(\"overwrite\").parquet(target)", "_____no_output_____" ] ], [ [ "### 과제 6. inner, left_outer, right_outer, full_outer 조인 실습 예제를 작성하시오\n", "_____no_output_____" ] ], [ [ "valuesA = [('A',1),('B',2),('C',3),('D',4)]\nA = spark.createDataFrame(valuesA,['a_id','a_value'])\n \nvaluesB = [('C',10),('D',20),('E',30),('F',40)]\nB = spark.createDataFrame(valuesB,['b_id','b_value'])\n\nA.join(B, A.a_id == B.b_id, \"inner\").sort(A.a_id.asc()).show() # C, D\n# A.join(B, A.a_id == B.b_id, \"left\").sort(A.a_id.asc()).show() # A, B, C, D\n# A.join(B, A.a_id == B.b_id, \"right\").sort(B.b_id.asc()).show() # C, D, E, F\nA.join(B, A.a_id == B.b_id, \"left_outer\").sort(A.a_id.asc()).show() # A, B, C, D\nA.join(B, A.a_id == B.b_id, \"right_outer\").sort(B.b_id.asc()).show() # C, D, E, F\nA.join(B, A.a_id == B.b_id, \"full_outer\").sort(A.a_id.asc_nulls_last(), B.b_id.asc_nulls_last()).show() # A, B, C, D, E, F", "+----+-------+----+-------+\n|a_id|a_value|b_id|b_value|\n+----+-------+----+-------+\n| C| 3| C| 10|\n| D| 4| D| 20|\n+----+-------+----+-------+\n\n+----+-------+----+-------+\n|a_id|a_value|b_id|b_value|\n+----+-------+----+-------+\n| A| 1|null| null|\n| B| 2|null| null|\n| C| 3| C| 10|\n| D| 4| D| 20|\n+----+-------+----+-------+\n\n+----+-------+----+-------+\n|a_id|a_value|b_id|b_value|\n+----+-------+----+-------+\n| C| 3| C| 10|\n| D| 4| D| 20|\n|null| null| E| 30|\n|null| null| F| 40|\n+----+-------+----+-------+\n\n+----+-------+----+-------+\n|a_id|a_value|b_id|b_value|\n+----+-------+----+-------+\n| A| 1|null| null|\n| B| 2|null| null|\n| C| 3| C| 10|\n| D| 4| D| 20|\n|null| null| E| 30|\n|null| null| F| 40|\n+----+-------+----+-------+\n\n" ], [ "# full outer 조인 시에 결과 생성\nA.join(B, A.a_id == B.b_id, \"full_outer\").withColumn(\"id\", expr(\"case when a_id is null then b_id else a_id end\")).select(\"id\").sort(\"id\").show()\n# F.when(df.age > 4, 1).when(df.age < 3, -1).otherwise(0)", "+---+\n| id|\n+---+\n| A|\n| B|\n| C|\n| D|\n| E|\n| F|\n+---+\n\n" ], [ "A.join(B, A.a_id == B.b_id, \"full_outer\").show()", "+----+-------+----+-------+\n|a_id|a_value|b_id|b_value|\n+----+-------+----+-------+\n|null| null| F| 40|\n|null| null| E| 30|\n| B| 2|null| null|\n| D| 4| D| 20|\n| C| 3| C| 10|\n| A| 1|null| null|\n+----+-------+----+-------+\n\n" ], [ "A.join(B, A.a_id == B.b_id, \"full_outer\").withColumn(\"id\", when(A.a_id.isNull(), B.b_id).otherwise(A.a_id)).select(\"id\").sort(\"id\").show()", "+---+\n| id|\n+---+\n| A|\n| B|\n| C|\n| D|\n| E|\n| F|\n+---+\n\n" ] ], [ [ "### 과제 7. 전일자 디멘젼 정보와 오늘자 로그를 이용하여 팩트 데이터를 생성합니다.\n", "_____no_output_____" ], [ "### 과제 8. 팩트 데이터를 이용하여 2020/10/25 기준 성별 매출금액 지표를 추출합니다\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ] ]
d013e4a870d5cd265cc0e1b047dd620cd66bc4d0
23,562
ipynb
Jupyter Notebook
Week1/Counting Labels and weight loss function.ipynb
Armos05/Ai-for-Medical-Diagnosis-Specialization
fab2a6799335beccdeb1255fba08a779909c2d8a
[ "MIT" ]
1
2021-11-21T13:35:58.000Z
2021-11-21T13:35:58.000Z
Week1/Counting Labels and weight loss function.ipynb
Armos05/Ai-for-Medical-Diagnosis-Specialization
fab2a6799335beccdeb1255fba08a779909c2d8a
[ "MIT" ]
null
null
null
Week1/Counting Labels and weight loss function.ipynb
Armos05/Ai-for-Medical-Diagnosis-Specialization
fab2a6799335beccdeb1255fba08a779909c2d8a
[ "MIT" ]
null
null
null
31.542169
308
0.575673
[ [ [ "## AI for Medicine Course 1 Week 1 lecture exercises", "_____no_output_____" ], [ "<a name=\"counting-labels\"></a>\n# Counting labels\n\nAs you saw in the lecture videos, one way to avoid having class imbalance impact the loss function is to weight the losses differently. To choose the weights, you first need to calculate the class frequencies.\n\nFor this exercise, you'll just get the count of each label. Later on, you'll use the concepts practiced here to calculate frequencies in the assignment!", "_____no_output_____" ] ], [ [ "# Import the necessary packages\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline", "_____no_output_____" ], [ "# Read csv file containing training datadata\ntrain_df = pd.read_csv(\"nih/train-small.csv\")", "_____no_output_____" ], [ "# Count up the number of instances of each class (drop non-class columns from the counts)\nclass_counts = train_df.sum().drop(['Image','PatientId'])\n", "_____no_output_____" ], [ "for column in class_counts.keys():\n print(f\"The class {column} has {train_df[column].sum()} samples\")", "_____no_output_____" ], [ "# Plot up the distribution of counts\nsns.barplot(class_counts.values, class_counts.index, color='b')\nplt.title('Distribution of Classes for Training Dataset', fontsize=15)\nplt.xlabel('Number of Patients', fontsize=15)\nplt.ylabel('Diseases', fontsize=15)\nplt.show()", "_____no_output_____" ] ], [ [ "<a name=\"weighted-loss\"></a>\n# Weighted Loss function\n", "_____no_output_____" ], [ "Below is an example of calculating weighted loss. In the assignment, you will calculate a weighted loss function. This sample code will give you some intuition for what the weighted loss function is doing, and also help you practice some syntax you will use in the graded assignment.\n\nFor this example, you'll first define a hypothetical set of true labels and then a set of predictions.\n\nRun the next cell to create the 'ground truth' labels.", "_____no_output_____" ] ], [ [ "# Generate an array of 4 binary label values, 3 positive and 1 negative\ny_true = np.array(\n [[1],\n [1],\n [1],\n [0]])\nprint(f\"y_true: \\n{y_true}\")", "_____no_output_____" ] ], [ [ "### Two models\nTo better understand the loss function, you will pretend that you have two models.\n- Model 1 always outputs a 0.9 for any example that it's given. \n- Model 2 always outputs a 0.1 for any example that it's given.", "_____no_output_____" ] ], [ [ "# Make model predictions that are always 0.9 for all examples\ny_pred_1 = 0.9 * np.ones(y_true.shape)\nprint(f\"y_pred_1: \\n{y_pred_1}\")\nprint()\ny_pred_2 = 0.1 * np.ones(y_true.shape)\nprint(f\"y_pred_2: \\n{y_pred_2}\")", "_____no_output_____" ] ], [ [ "### Problems with the regular loss function\nThe learning goal here is to notice that with a regular loss function (not a weighted loss), the model that always outputs 0.9 has a smaller loss (performs better) than model 2.\n- This is because there is a class imbalance, where 3 out of the 4 labels are 1.\n- If the data were perfectly balanced, (two labels were 1, and two labels were 0), model 1 and model 2 would have the same loss. Each would get two examples correct and two examples incorrect.\n- However, since the data is not balanced, the regular loss function implies that model 1 is better than model 2.", "_____no_output_____" ], [ "### Notice the shortcomings of a regular non-weighted loss\n\nSee what loss you get from these two models (model 1 always predicts 0.9, and model 2 always predicts 0.1), see what the regular (unweighted) loss function is for each model.", "_____no_output_____" ] ], [ [ "loss_reg_1 = -1 * np.sum(y_true * np.log(y_pred_1)) + \\\n -1 * np.sum((1 - y_true) * np.log(1 - y_pred_1))\nprint(f\"loss_reg_1: {loss_reg_1:.4f}\")", "_____no_output_____" ], [ "loss_reg_2 = -1 * np.sum(y_true * np.log(y_pred_2)) + \\\n -1 * np.sum((1 - y_true) * np.log(1 - y_pred_2))\nprint(f\"loss_reg_2: {loss_reg_2:.4f}\")", "_____no_output_____" ], [ "print(f\"When the model 1 always predicts 0.9, the regular loss is {loss_reg_1:.4f}\")\nprint(f\"When the model 2 always predicts 0.1, the regular loss is {loss_reg_2:.4f}\")", "_____no_output_____" ] ], [ [ "Notice that the loss function gives a greater loss when the predictions are always 0.1, because the data is imbalanced, and has three labels of `1` but only one label for `0`.\n\nGiven a class imbalance with more positive labels, the regular loss function implies that the model with the higher prediction of 0.9 performs better than the model with the lower prediction of 0.1", "_____no_output_____" ], [ "### How a weighted loss treats both models the same\nWith a weighted loss function, you will get the same weighted loss when the predictions are all 0.9 versus when the predictions are all 0.1. \n- Notice how a prediction of 0.9 is 0.1 away from the positive label of 1.\n- Also notice how a prediction of 0.1 is 0.1 away from the negative label of 0\n- So model 1 and 2 are \"symmetric\" along the midpoint of 0.5, if you plot them on a number line between 0 and 1.", "_____no_output_____" ], [ "### Weighted Loss Equation\nCalculate the loss for the zero-th label (column at index 0)\n\n- The loss is made up of two terms. To make it easier to read the code, you will calculate each of these terms separately. We are giving each of these two terms a name for explanatory purposes, but these are not officially called $loss_{pos}$ or $loss_{neg}$\n\n - $loss_{pos}$: we'll use this to refer to the loss where the actual label is positive (the positive examples).\n - $loss_{neg}$: we'll use this to refer to the loss where the actual label is negative (the negative examples). \n\n$$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$\n\n$$loss_{pos}^{(i)} = -1 \\times weight_{pos}^{(i)} \\times y^{(i)} \\times log(\\hat{y}^{(i)})$$\n\n$$loss_{neg}^{(i)} = -1 \\times weight_{neg}^{(i)} \\times (1- y^{(i)}) \\times log(1 - \\hat{y}^{(i)})$$", "_____no_output_____" ], [ "Since this sample dataset is small enough, you can calculate the positive weight to be used in the weighted loss function. To get the positive weight, count how many NEGATIVE labels are present, divided by the total number of examples.\n\nIn this case, there is one negative label, and four total examples.\n\nSimilarly, the negative weight is the fraction of positive labels.\n\nRun the next cell to define positive and negative weights.", "_____no_output_____" ] ], [ [ "# calculate the positive weight as the fraction of negative labels\nw_p = 1/4\n\n# calculate the negative weight as the fraction of positive labels\nw_n = 3/4\n\nprint(f\"positive weight w_p: {w_p}\")\nprint(f\"negative weight w_n {w_n}\")", "_____no_output_____" ] ], [ [ "### Model 1 weighted loss\nRun the next two cells to calculate the two loss terms separately.\n\nHere, `loss_1_pos` and `loss_1_neg` are calculated using the `y_pred_1` predictions.", "_____no_output_____" ] ], [ [ "# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'\nloss_1_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_1 ))\nprint(f\"loss_1_pos: {loss_1_pos:.4f}\")", "_____no_output_____" ], [ "# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'\nloss_1_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_1 ))\nprint(f\"loss_1_neg: {loss_1_neg:.4f}\")", "_____no_output_____" ], [ "# Sum positive and negative losses to calculate total loss\nloss_1 = loss_1_pos + loss_1_neg\nprint(f\"loss_1: {loss_1:.4f}\")", "_____no_output_____" ] ], [ [ "### Model 2 weighted loss\n\nNow do the same calculations for when the predictions are from `y_pred_2'. Calculate the two terms of the weighted loss function and add them together.", "_____no_output_____" ] ], [ [ "# Calculate and print out the first term in the loss function, which we are calling 'loss_pos'\nloss_2_pos = -1 * np.sum(w_p * y_true * np.log(y_pred_2))\nprint(f\"loss_2_pos: {loss_2_pos:.4f}\")", "_____no_output_____" ], [ "# Calculate and print out the second term in the loss function, which we're calling 'loss_neg'\nloss_2_neg = -1 * np.sum(w_n * (1 - y_true) * np.log(1 - y_pred_2))\nprint(f\"loss_2_neg: {loss_2_neg:.4f}\")", "_____no_output_____" ], [ "# Sum positive and negative losses to calculate total loss when the prediction is y_pred_2\nloss_2 = loss_2_pos + loss_2_neg\nprint(f\"loss_2: {loss_2:.4f}\")", "_____no_output_____" ] ], [ [ "### Compare model 1 and model 2 weighted loss", "_____no_output_____" ] ], [ [ "print(f\"When the model always predicts 0.9, the total loss is {loss_1:.4f}\")\nprint(f\"When the model always predicts 0.1, the total loss is {loss_2:.4f}\")", "_____no_output_____" ] ], [ [ "### What do you notice?\nSince you used a weighted loss, the calculated loss is the same whether the model always predicts 0.9 or always predicts 0.1. \n\nYou may have also noticed that when you calculate each term of the weighted loss separately, there is a bit of symmetry when comparing between the two sets of predictions.", "_____no_output_____" ] ], [ [ "print(f\"loss_1_pos: {loss_1_pos:.4f} \\t loss_1_neg: {loss_1_neg:.4f}\")\nprint()\nprint(f\"loss_2_pos: {loss_2_pos:.4f} \\t loss_2_neg: {loss_2_neg:.4f}\")", "_____no_output_____" ] ], [ [ "Even though there is a class imbalance, where there are 3 positive labels but only one negative label, the weighted loss accounts for this by giving more weight to the negative label than to the positive label.", "_____no_output_____" ], [ "### Weighted Loss for more than one class\n\nIn this week's assignment, you will calculate the multi-class weighted loss (when there is more than one disease class that your model is learning to predict). Here, you can practice working with 2D numpy arrays, which will help you implement the multi-class weighted loss in the graded assignment.\n\nYou will work with a dataset that has two disease classes (two columns)", "_____no_output_____" ] ], [ [ "# View the labels (true values) that you will practice with\ny_true = np.array(\n [[1,0],\n [1,0],\n [1,0],\n [1,0],\n [0,1]\n ])\ny_true", "_____no_output_____" ] ], [ [ "### Choosing axis=0 or axis=1\nYou will use `numpy.sum` to count the number of times column `0` has the value 0. \nFirst, notice the difference when you set axis=0 versus axis=1", "_____no_output_____" ] ], [ [ "# See what happens when you set axis=0\nprint(f\"using axis = 0 {np.sum(y_true,axis=0)}\")\n\n# Compare this to what happens when you set axis=1\nprint(f\"using axis = 1 {np.sum(y_true,axis=1)}\")", "_____no_output_____" ] ], [ [ "Notice that if you choose `axis=0`, the sum is taken for each of the two columns. This is what you want to do in this case. If you set `axis=1`, the sum is taken for each row.", "_____no_output_____" ], [ "### Calculate the weights\nPreviously, you visually inspected the data to calculate the fraction of negative and positive labels. Here, you can do this programmatically.", "_____no_output_____" ] ], [ [ "# set the positive weights as the fraction of negative labels (0) for each class (each column)\nw_p = np.sum(y_true == 0,axis=0) / y_true.shape[0]\nw_p", "_____no_output_____" ], [ "# set the negative weights as the fraction of positive labels (1) for each class\nw_n = np.sum(y_true == 1, axis=0) / y_true.shape[0]\nw_n", "_____no_output_____" ] ], [ [ "In the assignment, you will train a model to try and make useful predictions. In order to make this example easier to follow, you will pretend that your model always predicts the same value for every example.", "_____no_output_____" ] ], [ [ "# Set model predictions where all predictions are the same\ny_pred = np.ones(y_true.shape)\ny_pred[:,0] = 0.3 * y_pred[:,0]\ny_pred[:,1] = 0.7 * y_pred[:,1]\ny_pred", "_____no_output_____" ] ], [ [ "As before, calculate the two terms that make up the loss function. Notice that you are working with more than one class (represented by columns). In this case, there are two classes.\n\nStart by calculating the loss for class `0`.\n\n$$ loss^{(i)} = loss_{pos}^{(i)} + los_{neg}^{(i)} $$\n\n$$loss_{pos}^{(i)} = -1 \\times weight_{pos}^{(i)} \\times y^{(i)} \\times log(\\hat{y}^{(i)})$$\n\n$$loss_{neg}^{(i)} = -1 \\times weight_{neg}^{(i)} \\times (1- y^{(i)}) \\times log(1 - \\hat{y}^{(i)})$$", "_____no_output_____" ], [ "View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the positive predictions.", "_____no_output_____" ] ], [ [ "# Print and view column zero of the weight\nprint(f\"w_p[0]: {w_p[0]}\")\nprint(f\"y_true[:,0]: {y_true[:,0]}\")\nprint(f\"y_pred[:,0]: {y_pred[:,0]}\")", "_____no_output_____" ], [ "# calculate the loss from the positive predictions, for class 0\nloss_0_pos = -1 * np.sum(w_p[0] * \n y_true[:, 0] * \n np.log(y_pred[:, 0])\n )\nprint(f\"loss_0_pos: {loss_0_pos:.4f}\")", "_____no_output_____" ] ], [ [ "View the zero column for the weights, true values, and predictions that you will use to calculate the loss from the negative predictions.", "_____no_output_____" ] ], [ [ "# Print and view column zero of the weight\nprint(f\"w_n[0]: {w_n[0]}\")\nprint(f\"y_true[:,0]: {y_true[:,0]}\")\nprint(f\"y_pred[:,0]: {y_pred[:,0]}\")", "_____no_output_____" ], [ "# Calculate the loss from the negative predictions, for class 0\nloss_0_neg = -1 * np.sum( \n w_n[0] * \n (1 - y_true[:, 0]) * \n np.log(1 - y_pred[:, 0])\n )\nprint(f\"loss_0_neg: {loss_0_neg:.4f}\")", "_____no_output_____" ], [ "# add the two loss terms to get the total loss for class 0\nloss_0 = loss_0_neg + loss_0_pos\nprint(f\"loss_0: {loss_0:.4f}\")", "_____no_output_____" ] ], [ [ "Now you are familiar with the array slicing that you would use when there are multiple disease classes stored in a two-dimensional array.\n\n#### Now it's your turn!\n* Can you calculate the loss for class (column) `1`? ", "_____no_output_____" ] ], [ [ "# calculate the loss from the positive predictions, for class 1\nloss_1_pos = None", "_____no_output_____" ] ], [ [ "Expected output\n```CPP\nloss_1_pos: 0.2853\n```", "_____no_output_____" ] ], [ [ "# Calculate the loss from the negative predictions, for class 1\nloss_1_neg = None", "_____no_output_____" ] ], [ [ "#### Expected output\n```CPP\nloss_1_neg: 0.9632\n```", "_____no_output_____" ] ], [ [ "# add the two loss terms to get the total loss for class 0\nloss_1 = None", "_____no_output_____" ] ], [ [ "#### Expected output\n```CPP\nloss_1: 1.2485\n```", "_____no_output_____" ], [ "### Note\nThe data for the two classes (two columns) as well as the predictions were chosen so that you end up getting the same weighted loss for both categories. \n - In general, you will expect to calculate different weighted loss values for each disease category, as the model predictions and data will differ from one category to another.", "_____no_output_____" ], [ "If you want some help, please click on the green \"Solution\" cell below to reveal the solution.", "_____no_output_____" ], [ "<details> \n<summary>\n <font size=\"3\" color=\"darkgreen\"><b>Solution</b></font>\n</summary>\n<p>\n<code>\n-- # calculate the loss from the positive predictions, for class 1\nloss_1_pos = -1 * np.sum(w_p[1] * \n y_true[:, 1] * \n np.log(y_pred[:, 1])\n )\nprint(f\"loss_1_pos: {loss_1_pos:.4f}\")\n \n-- # Calculate the loss from the negative predictions, for class 1\nloss_1_neg = -1 * np.sum( \n w_n[1] * \n (1 - y_true[:, 1]) * \n np.log(1 - y_pred[:, 1])\n )\nprint(f\"loss_1_neg: {loss_1_neg:.4f}\")\n\n-- # add the two loss terms to get the total loss for class 1\nloss_1 = loss_1_neg + loss_1_pos\nprint(f\"loss_1: {loss_1:.4f}\")\n </code>\n</p>\n", "_____no_output_____" ], [ "### How this practice relates to and differs from the upcoming graded assignment\n- In the assignment, you will generalize this to calculating the loss for any number of classes.\n- Also in the assignment, you will learn how to avoid taking the log of zero by adding a small number (more details will be explained in the assignment).\n- Note that in the lecture videos and in this lecture notebook, you are taking the **sum** of losses for all examples. In the assignment, you will take the **average (the mean)** for all examples.\n- Finally, in the assignment, you will work with \"tensors\" in TensorFlow, so you will use the TensorFlow equivalents of the numpy operations (keras.mean instead of numpy.mean).", "_____no_output_____" ], [ "#### That's all for this lab. You now have a couple more tools you'll need for this week's assignment!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d013eb9c7ec8ed3a4b50947e81bfe8d6884ebe71
13,992
ipynb
Jupyter Notebook
1-NumPy/Numpy Exercises - Solutions.ipynb
BhavyaSree/pythonForDataScience
6b6f2faa7a2e327dbe0d4588d098e8dfcca35cb9
[ "Apache-2.0" ]
null
null
null
1-NumPy/Numpy Exercises - Solutions.ipynb
BhavyaSree/pythonForDataScience
6b6f2faa7a2e327dbe0d4588d098e8dfcca35cb9
[ "Apache-2.0" ]
null
null
null
1-NumPy/Numpy Exercises - Solutions.ipynb
BhavyaSree/pythonForDataScience
6b6f2faa7a2e327dbe0d4588d098e8dfcca35cb9
[ "Apache-2.0" ]
null
null
null
20.516129
165
0.451544
[ [ [ "___\n\n<a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>\n___", "_____no_output_____" ], [ "# NumPy Exercises - Solutions\n\nNow that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks and then you'll be asked some more complicated questions.", "_____no_output_____" ], [ "#### Import NumPy as np", "_____no_output_____" ] ], [ [ "import numpy as np", "_____no_output_____" ] ], [ [ "#### Create an array of 10 zeros ", "_____no_output_____" ] ], [ [ "np.zeros(10)", "_____no_output_____" ] ], [ [ "#### Create an array of 10 ones", "_____no_output_____" ] ], [ [ "np.ones(10)", "_____no_output_____" ] ], [ [ "#### Create an array of 10 fives", "_____no_output_____" ] ], [ [ "np.ones(10) * 5", "_____no_output_____" ] ], [ [ "#### Create an array of the integers from 10 to 50", "_____no_output_____" ] ], [ [ "np.arange(10,51)", "_____no_output_____" ] ], [ [ "#### Create an array of all the even integers from 10 to 50", "_____no_output_____" ] ], [ [ "np.arange(10,51,2)", "_____no_output_____" ] ], [ [ "#### Create a 3x3 matrix with values ranging from 0 to 8", "_____no_output_____" ] ], [ [ "np.arange(9).reshape(3,3)", "_____no_output_____" ] ], [ [ "#### Create a 3x3 identity matrix", "_____no_output_____" ] ], [ [ "np.eye(3)", "_____no_output_____" ] ], [ [ "#### Use NumPy to generate a random number between 0 and 1", "_____no_output_____" ] ], [ [ "np.random.rand(1)", "_____no_output_____" ] ], [ [ "#### Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution", "_____no_output_____" ] ], [ [ "np.random.randn(25)", "_____no_output_____" ] ], [ [ "#### Create the following matrix:", "_____no_output_____" ] ], [ [ "np.arange(1,101).reshape(10,10) / 100", "_____no_output_____" ] ], [ [ "#### Create an array of 20 linearly spaced points between 0 and 1:", "_____no_output_____" ] ], [ [ "np.linspace(0,1,20)", "_____no_output_____" ] ], [ [ "## Numpy Indexing and Selection\n\nNow you will be given a few matrices, and be asked to replicate the resulting matrix outputs:", "_____no_output_____" ] ], [ [ "mat = np.arange(1,26).reshape(5,5)\nmat", "_____no_output_____" ], [ "# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE", "_____no_output_____" ], [ "mat[2:,1:]", "_____no_output_____" ], [ "# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE", "_____no_output_____" ], [ "mat[3,4]", "_____no_output_____" ], [ "# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE", "_____no_output_____" ], [ "mat[:3,1:2]", "_____no_output_____" ], [ "# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE", "_____no_output_____" ], [ "mat[4,:]", "_____no_output_____" ], [ "# WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW\n# BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T\n# BE ABLE TO SEE THE OUTPUT ANY MORE", "_____no_output_____" ], [ "mat[3:5,:]", "_____no_output_____" ] ], [ [ "### Now do the following", "_____no_output_____" ], [ "#### Get the sum of all the values in mat", "_____no_output_____" ] ], [ [ "np.sum(mat)", "_____no_output_____" ] ], [ [ "#### Get the standard deviation of the values in mat", "_____no_output_____" ] ], [ [ "np.std(mat)", "_____no_output_____" ] ], [ [ "#### Get the sum of all the columns in mat", "_____no_output_____" ] ], [ [ "mat.sum(axis=0)", "_____no_output_____" ] ], [ [ "# Great Job!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d0140351011da4ddec63c88c4c45f914dde24568
110,044
ipynb
Jupyter Notebook
graphing/BasicGraphAssignment.ipynb
nolanpreilly/nolanpreilly.github.io
f2819689f0cc7b0ca675ea8d1b45ab7acb1e85ca
[ "CC0-1.0" ]
null
null
null
graphing/BasicGraphAssignment.ipynb
nolanpreilly/nolanpreilly.github.io
f2819689f0cc7b0ca675ea8d1b45ab7acb1e85ca
[ "CC0-1.0" ]
null
null
null
graphing/BasicGraphAssignment.ipynb
nolanpreilly/nolanpreilly.github.io
f2819689f0cc7b0ca675ea8d1b45ab7acb1e85ca
[ "CC0-1.0" ]
null
null
null
217.908911
38,608
0.902775
[ [ [ "# Opiods VA - Nolan Reilly ", "_____no_output_____" ] ], [ [ "import pandas as pd\n%matplotlib inline\nimport matplotlib.pyplot as plt\n\n", "_____no_output_____" ], [ "opiodsva = pd.read_csv('OpidsVA.csv') #importing data\nopiodsva.head()", "_____no_output_____" ] ], [ [ "## Do opioid overdoes tend to be associated with less affluent areas—that is, areas where families have lower incomes?\n", "_____no_output_____" ] ], [ [ "plt.scatter(opiodsva['MedianHouseholdIncome'], opiodsva['FPOO-Rate'])\nplt.xlabel('Median Household Income ($)')\nplt.ylabel('Opiod Overdoses')\nplt.suptitle(\"Median Household Income vs Opiod Overdoses\")\nplt.show()", "_____no_output_____" ] ], [ [ "### I used a scatterplot because they can easily show the realtionship between 2 variables based on the grouping of the data points. It appears that as median houshold income rises, expected overdoses goes down. ", "_____no_output_____" ], [ "## Some people who start with opioid addictions are reported to transition to heroin use. What is the relationship in Virginia counties between opioid overdoses and heroin overdoses? ", "_____no_output_____" ] ], [ [ "plt.scatter(opiodsva['FFHO-Rate'], opiodsva['FPOO-Rate'])\nplt.xlabel('Fentanyl/Heroin Overdoses')\nplt.ylabel('Opiod Overdoses')\nplt.suptitle('VA Opiod Overdoes vs Fentanyl Overdoses')\nplt.show()", "_____no_output_____" ] ], [ [ "### There is a relationship with high fentanyl/heroin overdoses increasing the number of opiod overdoses. The relationship is not as strong as I expected, I would like to see the reporting methods.", "_____no_output_____" ], [ "# Presidents", "_____no_output_____" ], [ "## Which states are associated with the greatest number of United States presidents in terms of the presidents’ birthplaces?", "_____no_output_____" ] ], [ [ "presidents = pd.read_csv('presidents.csv')", "_____no_output_____" ], [ "presidents.head()", "_____no_output_____" ], [ "presidents['State'].value_counts().plot(kind = 'bar')\nplt.xlabel('State')\nplt.ylabel('Presidents born')\nplt.suptitle('Presidential Birthplaces')\nplt.show()", "_____no_output_____" ] ], [ [ "### A bar chart appropriatly shows the values for each state. Virginia and Ohio are the two most common states for a US president to be born in. ", "_____no_output_____" ], [ "# Total NSA", "_____no_output_____" ], [ "## How have vehicle sales in the United States varied over time?", "_____no_output_____" ] ], [ [ "cars = pd.read_csv('TOTALNSA.csv')", "_____no_output_____" ], [ "cars.head()", "_____no_output_____" ], [ "plt.plot(cars['DATE'], cars['TOTALNSA'])\nplt.xlabel('Date')\nplt.ylabel('Car Sales')\nplt.suptitle('Monthly US Car Sales Since Jan 1 1970')\nplt.xticks(cars['DATE'])\nplt.show()", "_____no_output_____" ] ], [ [ "### Line charts are appropriate for time-series data as they show the changes over time. ", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ] ]
d014323970de6926dac3e82f078b2a2d40aeb82b
64,378
ipynb
Jupyter Notebook
code/wrangling/Data_wrangling_2.ipynb
yifeitung/University-Learning-Analytics
16f29477086c0d31cfa383436338f3b19b4cdecc
[ "MIT" ]
3
2019-09-28T18:51:33.000Z
2021-02-08T18:29:11.000Z
code/wrangling/Data_wrangling_2.ipynb
yifeitung/University-Learning-Analytics
16f29477086c0d31cfa383436338f3b19b4cdecc
[ "MIT" ]
null
null
null
code/wrangling/Data_wrangling_2.ipynb
yifeitung/University-Learning-Analytics
16f29477086c0d31cfa383436338f3b19b4cdecc
[ "MIT" ]
5
2019-09-28T19:06:10.000Z
2020-08-30T23:50:39.000Z
37.847149
305
0.292584
[ [ [ "import psycopg2\nimport pandas as pd\nimport pandas.io.sql as pd_sql\nimport numpy as np\nimport matplotlib.pyplot as plt\n", "_____no_output_____" ], [ "def connectDB(DB):\n # connect to the PostgreSQL server\n return psycopg2.connect(\n database=DB,\n user=\"postgres\",\n password=\"Georgetown16\",\n host=\"database-1.c5vispb5ezxg.us-east-1.rds.amazonaws.com\",\n port='5432')\n\ndef disconnectDB(conn):\n \n conn.close()\n", "_____no_output_____" ], [ "# connect to \"Dataset\" DB \nconn = connectDB(\"Dataset\")\n\n# extract everything from 'table_name' into a dataframe\ndf = pd_sql.read_sql(f\"select * from public.\\\"analysisFeatures\\\" \", con=conn).reset_index()\n\n#make sure that all columns are displayed in our dataframe\npd.set_option('display.max_column',50)\n\n#check dataframe\ndf.head(100)\n ", "_____no_output_____" ], [ "#count null values of date_unregistration\ndf['date_unregistration'].isnull().sum()", "_____no_output_____" ], [ "drop_list = ['reg_period','code_presentation','date_unregistration','pass_fail_ind','std_total_weight']\n\ndf = df.drop(drop_list, axis=1)\ndf.head(10)", "_____no_output_____" ], [ "df['module_domain'].value_counts()", "_____no_output_____" ], [ "df['code_module'].value_counts()", "_____no_output_____" ], [ "#mapping the columns\ndf['imd_band'] = df['imd_band'].map({'0-10%':0,'10-20':1,'20-30%':2,'30-40%':3,'40-50%':4,'50-60%':5,'60-70%':6,'70-80%':7,'80-90%':8,'90-100%':9})\ndf['module_domain'] = df['module_domain'].map({'SocialScience': 0,'STEM': 1})\ndf['code_module'] = df['code_module'].map({'AAA': 0,'BBB': 1,'CCC':2,'DDD':3,'EEE':4,'FFF':5,'GGG':6})\ndf['term'] = df['term'].map({'J': 0,'B': 1})\ndf['year'] = df['year'].map({'2013': 0,'2014': 1})\ndf['gender'] = df['gender'].map({'M': 0,'F': 1})\ndf['age_band'] = df['age_band'].map({'0-35': 0,'35-55': 1,'55<=':2})\ndf['region'] = df['region'].map({'Scotland': 0,'East Anglian Region': 1,'London Region':2,'South Region': 3,'North Western Region': 4,'West Midlands Region':5,'South West Region': 6,'East Midlands Region': 7,'South East Region':8,'Wales': 9,'Yorkshire Region': 10,'North Region':11,'Ireland':12})\ndf['final_result'] = df['final_result'].map({'Withdrawn':0, 'Fail':0,'Pass':1,'Distinction':1})\ndf['disability'] = df['disability'].map({'N':0,'Y':1})\ndf['highest_education'] = df['highest_education'].map({'No Formal quals':0,'Lower Than A Level':1,'A Level or Equivalent':2,'HE Qualification':3,'Post Graduate Qualification':4})", "_____no_output_____" ], [ "df.head(10)", "_____no_output_____" ], [ "# write dataframe to database\nfrom sqlalchemy import create_engine\nengine = create_engine('postgresql://postgres:[email protected]:5432/Dataset')\ndf.to_sql('featureSTG1', engine, if_exists='replace')", "_____no_output_____" ], [ "disconnectDB(conn)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01433790a98fac258436a6bc819a17663d25162
25,511
ipynb
Jupyter Notebook
credit_risk_resampling.ipynb
talibkateeb/Logistic-Regression-Credit-Risk-Analysis
52cb58bfcd7713265c92f5d3e6bde95609c4f03b
[ "MIT" ]
null
null
null
credit_risk_resampling.ipynb
talibkateeb/Logistic-Regression-Credit-Risk-Analysis
52cb58bfcd7713265c92f5d3e6bde95609c4f03b
[ "MIT" ]
null
null
null
credit_risk_resampling.ipynb
talibkateeb/Logistic-Regression-Credit-Risk-Analysis
52cb58bfcd7713265c92f5d3e6bde95609c4f03b
[ "MIT" ]
null
null
null
29.977673
583
0.409862
[ [ [ "# Import the modules\nimport numpy as np\nimport pandas as pd\nfrom pathlib import Path\nfrom sklearn.metrics import balanced_accuracy_score\nfrom sklearn.metrics import confusion_matrix\nfrom imblearn.metrics import classification_report_imbalanced\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "# Read the CSV file from the Resources folder into a Pandas DataFrame\nloans_df = pd.read_csv(Path('Resources/lending_data.csv'))\n\n# Review the DataFrame\ndisplay(loans_df.head())\ndisplay(loans_df.tail())", "_____no_output_____" ], [ "# Separate the data into labels and features\n# Separate the y variable, the labels\ny = loans_df['loan_status']\n\n# Separate the X variable, the features\nX = loans_df.drop(columns=['loan_status'])", "_____no_output_____" ], [ "# Review the y variable Series\ndisplay(y.head())\ndisplay(y.tail())", "_____no_output_____" ], [ "# Review the X variable DataFrame\ndisplay(X.head())\ndisplay(X.tail())", "_____no_output_____" ], [ "# Check the balance of our target values\ny.value_counts()", "_____no_output_____" ], [ "# Import the train_test_learn module\nfrom sklearn.model_selection import train_test_split\n\n# Split the data using train_test_split\n# Assign a random_state of 1 to the function\ntrain_X, test_X, train_y, test_y = train_test_split(X, y, random_state=1)", "_____no_output_____" ] ], [ [ "---", "_____no_output_____" ] ], [ [ "# Import the LogisticRegression module from SKLearn\nfrom sklearn.linear_model import LogisticRegression\n\n# Instantiate the Logistic Regression model\n# Assign a random_state parameter of 1 to the model\nlogistic_regression_model = LogisticRegression(random_state=1)\n\n# Fit the model using training data\nlogistic_regression_model.fit(train_X, train_y)", "_____no_output_____" ], [ "# Make a prediction using the testing data\ntesting_predictions = logistic_regression_model.predict(test_X)", "_____no_output_____" ], [ "# Print the balanced_accuracy score of the model\nbalanced_accuracy_score(test_y, testing_predictions)", "_____no_output_____" ], [ "# Generate a confusion matrix for the model\nconfusion_matrix(test_y, testing_predictions)", "_____no_output_____" ], [ "# Print the classification report for the model\nprint(classification_report_imbalanced(test_y, testing_predictions))", " pre rec spe f1 geo iba sup\n\n 0 1.00 0.99 0.91 1.00 0.95 0.91 18765\n 1 0.85 0.91 0.99 0.88 0.95 0.90 619\n\navg / total 0.99 0.99 0.91 0.99 0.95 0.91 19384\n\n" ] ], [ [ "**Question:** How well does the logistic regression model predict both the `0` (healthy loan) and `1` (high-risk loan) labels?\n\n**Answer:** The model appears to predict both of them with really well. It predicts the healthy loan almost perfectly, and predicts the high risk loan a little less accuratley but still very high. Both their precision and recall scores are high as well as their F-1 score. The healthy loan has perfect on 2/3 and 0.99 on the recall. While the high risk loan has a 0.85 precision, 0.91 recall, and 0.88 F-1 score. However, due to the imbalance we cannot be sure that this is actually true, and that the results are not skewed due to the low value counts of the high risk loans. ", "_____no_output_____" ] ], [ [ "# Import the RandomOverSampler module form imbalanced-learn\nfrom imblearn.over_sampling import RandomOverSampler\n\n# Instantiate the random oversampler model\n# Assign a random_state parameter of 1 to the model\nrandom_oversampler = RandomOverSampler(random_state=1)\n\n# Fit the original training data to the random_oversampler model\nX_resampled, y_resampled = random_oversampler.fit_resample(train_X, train_y)", "_____no_output_____" ], [ "# Count the distinct values of the resampled labels data\ny_resampled.value_counts()", "_____no_output_____" ], [ "# Instantiate the Logistic Regression model\n# Assign a random_state parameter of 1 to the model\nnew_logistic_regression_model = LogisticRegression(random_state=1)\n\n# Fit the model using the resampled training data\nnew_logistic_regression_model.fit(X_resampled, y_resampled)\n\n# Make a prediction using the testing data\noversampled_predictions = new_logistic_regression_model.predict(test_X)", "_____no_output_____" ], [ "# Print the balanced_accuracy score of the model \nbalanced_accuracy_score(test_y, oversampled_predictions)", "_____no_output_____" ], [ "# Generate a confusion matrix for the model\nconfusion_matrix(test_y, oversampled_predictions)", "_____no_output_____" ], [ "# Print the classification report for the model\nprint(classification_report_imbalanced(test_y, oversampled_predictions))", " pre rec spe f1 geo iba sup\n\n 0 1.00 0.99 0.99 1.00 0.99 0.99 18765\n 1 0.84 0.99 0.99 0.91 0.99 0.99 619\n\navg / total 0.99 0.99 0.99 0.99 0.99 0.99 19384\n\n" ] ], [ [ "**Question:** How well does the logistic regression model, fit with oversampled data, predict both the `0` (healthy loan) and `1` (high-risk loan) labels?\n\n**Answer:** Overall, the logistic regression model fit with oversampled data predicts the healthy and high risk loans better than the original non-oversampled data. Even though the original model had high scores for accuracy, precision, recall, and F-1, it appears the new oversampled model had higher scores in all categories. So the oversampled model predicts better than the one fit with original data.", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ] ]
d0143be95b65891978d90761b6e379c1ca323ed7
10,178
ipynb
Jupyter Notebook
03_Tests_et_boucles.ipynb
Tofull/introduction_python
0e68a2a585dea63fa10e5b08535172e4ee140d92
[ "MIT" ]
3
2018-03-15T08:53:28.000Z
2018-03-15T20:46:45.000Z
03_Tests_et_boucles.ipynb
Tofull/introduction_python
0e68a2a585dea63fa10e5b08535172e4ee140d92
[ "MIT" ]
null
null
null
03_Tests_et_boucles.ipynb
Tofull/introduction_python
0e68a2a585dea63fa10e5b08535172e4ee140d92
[ "MIT" ]
null
null
null
22.222707
132
0.501179
[ [ [ "# Introduction à Python", "_____no_output_____" ], [ "> présentée par Loïc Messal", "_____no_output_____" ], [ "## Introduction aux flux de contrôles\n", "_____no_output_____" ], [ "### Les tests", "_____no_output_____" ], [ "Ils permettent d'exécuter des déclarations sous certaines conditions.", "_____no_output_____" ] ], [ [ "age = 17\nif age < 18:\n print(\"Mineur\") # executé si et seulement si la condition est vraie", "Mineur\n" ], [ "age = 19\nif age < 18:\n print(\"Mineur\") # executé si et seulement si la condition est vraie\nelse:\n print(\"Majeur\") # exécuté si et seulement si la condition est fausse", "Majeur\n" ], [ "employeur = \"JLR\"\n# employeur = \"Jakarto\"\n# employeur = \"Une autre entreprise\"\n\n# Commentez, décommentez les valeurs de la variable employeur pour tester.\n\nif employeur == \"JLR\":\n # exécuté si et seulement si la condition employeur == \"JLR\" est vraie\n richesse_statut = \"riche\"\nelif employeur == \"Jakarto\":\n # exécuté si et seulement si la condition employeur == \"Jakarto\" est vraie \n # et qu'aucune condition précédente n'a été remplie\n richesse_statut = \"ça s'en vient bientôt\" \nelse:\n # exécuté si et seulement si aucune condition précédente n'a été remplie\n richesse_statut = \"probablement pas\"\n \nprint(\"Richesse d'un employé de {} : {}\".format(employeur, richesse_statut))", "Richesse d'un employé de JLR : riche\n" ] ], [ [ "### Les boucles", "_____no_output_____" ], [ "Les boucles permettent d'itérer sur des itérables (composés de plusieurs éléments).", "_____no_output_____" ] ], [ [ "un_iterable = []\nun_iterable.append({\"nom\": \"Messal\", \"prénom\": \"Loïc\", \"employeur\": \"Jakarto\", \"age\": 23})\nun_iterable.append({\"nom\": \"Lassem\", \"prénom\": \"Ciol\", \"employeur\": \"Otrakaj\", \"age\": 17})\nun_iterable.append({\"nom\": \"Alssem\", \"prénom\": \"Icol\", \"employeur\": \"Torakaj\", \"age\": 20})\nun_iterable", "_____no_output_____" ], [ "for item in un_iterable:\n print(\"{} {} travaille chez {}.\".format(item[\"prénom\"], item[\"nom\"], item[\"employeur\"]))", "Loïc Messal travaille chez Jakarto.\nCiol Lassem travaille chez Otrakaj.\nIcol Alssem travaille chez Torakaj.\n" ] ], [ [ "Il est possible de générer des séquences avec la fonction `range()`.", "_____no_output_____" ] ], [ [ "for compteur in range(5): # range(5) génére une séquence de 0 à 5 (exclus)\n print(compteur)", "0\n1\n2\n3\n4\n" ], [ "for compteur in range(1, 5+1): # range(1, 5+1) génére une séquence de 1 à 5+1 (exclus)\n print(compteur)", "1\n2\n3\n4\n5\n" ], [ "for index in range(len(un_iterable)):\n item = un_iterable[index] # accède à l'item à partir de son index\n print(\"Item {} : {} {} travaille chez {}.\".format(index, item[\"prénom\"], item[\"nom\"], item[\"employeur\"]))", "Item 0 : Loïc Messal travaille chez Jakarto.\nItem 1 : Ciol Lassem travaille chez Otrakaj.\nItem 2 : Icol Alssem travaille chez Torakaj.\n" ], [ "for index, item in enumerate(un_iterable): # enumerate permet d'itérer en obtenant l'index ET l'item\n print(\"Item {} : {} {} travaille chez {}.\".format(index, item[\"prénom\"], item[\"nom\"], item[\"employeur\"]))", "Item 0 : Loïc Messal travaille chez Jakarto.\nItem 1 : Ciol Lassem travaille chez Otrakaj.\nItem 2 : Icol Alssem travaille chez Torakaj.\n" ], [ "compteur = 0\nstop = 5\nwhile compteur < stop: # exécutera les déclarations suivantes tant que la condition est vraie\n print(compteur)\n compteur = compteur + 1", "0\n1\n2\n3\n4\n" ] ], [ [ "Il est possible de contrôler les boucles avec certains mots clés:\n- `continue` passera à l'itération suivante sans exécuter les déclarations qui suivent\n- `break` quittera la boucle prématurément", "_____no_output_____" ] ], [ [ "for index, item in enumerate(un_iterable):\n if item[\"age\"] < 18:\n continue # Si la condition est vraie, passage à l'itération suivante.\n print(\"Item {} : {} {} (majeur) travaille chez {}.\".format(index, item[\"prénom\"], item[\"nom\"], item[\"employeur\"]))", "Item 0 : Loïc Messal (majeur) travaille chez Jakarto.\nItem 2 : Icol Alssem (majeur) travaille chez Torakaj.\n" ], [ "for index, item in enumerate(un_iterable):\n print(\"Item {} : {} {} travaille chez {}.\".format(index, item[\"prénom\"], item[\"nom\"], item[\"employeur\"]))\n if item[\"prénom\"] == \"Loïc\":\n break # Arrête la boucle si la condition est vraie", "Item 0 : Loïc Messal travaille chez Jakarto.\n" ] ], [ [ "[Prochain chapitre : Les fonctions](04_Fonctions.ipynb)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d0143e2e1d488d3ce091ec4391edf4a6ad7399bf
16,373
ipynb
Jupyter Notebook
2020.07.2400_classification/.ipynb_checkpoints/LR_knn9-checkpoint.ipynb
danhtaihoang/classification
2c38012c28d50d4727f9242c792c4105a3a15fef
[ "MIT" ]
null
null
null
2020.07.2400_classification/.ipynb_checkpoints/LR_knn9-checkpoint.ipynb
danhtaihoang/classification
2c38012c28d50d4727f9242c792c4105a3a15fef
[ "MIT" ]
null
null
null
2020.07.2400_classification/.ipynb_checkpoints/LR_knn9-checkpoint.ipynb
danhtaihoang/classification
2c38012c28d50d4727f9242c792c4105a3a15fef
[ "MIT" ]
null
null
null
41.34596
198
0.592988
[ [ [ "## Logistic Regression", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\n\nfrom sklearn.model_selection import train_test_split,KFold\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import confusion_matrix,accuracy_score,precision_score,\\\nrecall_score,roc_curve,auc\n\nimport expectation_reflection as ER\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.model_selection import GridSearchCV\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.preprocessing import MinMaxScaler\nfrom function import split_train_test,make_data_balance", "_____no_output_____" ], [ "np.random.seed(1)", "_____no_output_____" ] ], [ [ "First of all, the processed data are imported.", "_____no_output_____" ] ], [ [ "#data_list = ['1paradox','2peptide','3stigma']\n#data_list = np.loadtxt('data_list.txt',dtype='str')\ndata_list = np.loadtxt('data_list_30sets.txt',dtype='str')\n#data_list = ['9coag']\n\nprint(data_list)", "['1paradox' '2peptide' '3stigma' '4nki' '5mental' '6smoking' '7anemia'\n '8language' '9coag' '10tazamia' '11hepato' '12heat' '13ef' '14cervix'\n '15heart' '16liver' '17nwosu' '18school' '19ibs' '21survival'\n '29parkinson' '30paradox2' '31renal' '33svr' '35pcos' '36probiotic'\n '101kidney' '102breast_cancer' '103diabetes_niddk'\n '104diabetic_retinopathy']\n" ], [ "def read_data(data_id): \n data_name = data_list[data_id]\n print('data_name:',data_name)\n Xy = np.loadtxt('../classification_data/%s/data_processed_knn7.dat'%data_name) \n X = Xy[:,:-1]\n y = Xy[:,-1]\n\n #print(np.unique(y,return_counts=True))\n X,y = make_data_balance(X,y)\n print(np.unique(y,return_counts=True))\n\n X, y = shuffle(X, y, random_state=1)\n X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.5,random_state = 1)\n \n sc = MinMaxScaler()\n X_train = sc.fit_transform(X_train)\n X_test = sc.transform(X_test)\n \n return X_train,X_test,y_train,y_test", "_____no_output_____" ], [ "def measure_performance(X_train,X_test,y_train,y_test):\n \n #model = LogisticRegression(max_iter=100)\n model = SGDClassifier(loss='log',max_iter=1000,tol=0.001) # 'log' for logistic regression, 'hinge' for SVM\n\n # regularization penalty space\n #penalty = ['l1','l2']\n penalty = ['elasticnet']\n\n # solver\n #solver=['saga']\n #solver=['liblinear']\n\n # regularization hyperparameter space\n #C = np.logspace(0, 4, 10)\n #C = [0.001,0.1,1.0,10.0,100.0]\n alpha = [0.001,0.01,0.1,1.0,10.,100.]\n\n # l1_ratio\n #l1_ratio = [0.1,0.5,0.9]\n l1_ratio = [0.,0.2,0.4,0.6,0.8,1.0]\n\n # Create hyperparameter options\n #hyperparameters = dict(penalty=penalty,solver=solver,C=C,l1_ratio=l1_ratio)\n #hyper_parameters = dict(penalty=penalty,solver=solver,C=C)\n hyper_parameters = dict(penalty=penalty,alpha=alpha,l1_ratio=l1_ratio)\n \n # Create grid search using cross validation\n clf = GridSearchCV(model, hyper_parameters, cv=4, iid='deprecated')\n \n # Fit grid search\n best_model = clf.fit(X_train, y_train)\n \n # View best hyperparameters\n #print('Best Penalty:', best_model.best_estimator_.get_params()['penalty'])\n #print('Best C:', best_model.best_estimator_.get_params()['C'])\n #print('Best alpha:', best_model.best_estimator_.get_params()['alpha'])\n #print('Best l1_ratio:', best_model.best_estimator_.get_params()['l1_ratio'])\n \n # best hyper parameters\n print('best_hyper_parameters:',best_model.best_params_)\n\n # performance:\n y_test_pred = best_model.best_estimator_.predict(X_test)\n acc = accuracy_score(y_test,y_test_pred)\n #print('Accuracy:', acc)\n\n p_test_pred = best_model.best_estimator_.predict_proba(X_test) # prob of [0,1]\n p_test_pred = p_test_pred[:,1] # prob of 1 \n fp,tp,thresholds = roc_curve(y_test, p_test_pred, drop_intermediate=False)\n roc_auc = auc(fp,tp)\n #print('AUC:', roc_auc)\n\n precision = precision_score(y_test,y_test_pred)\n #print('Precision:',precision)\n\n recall = recall_score(y_test,y_test_pred)\n #print('Recall:',recall)\n \n f1_score = 2*precision*recall/(precision+recall)\n\n return acc,roc_auc,precision,recall,f1_score", "_____no_output_____" ], [ "n_data = len(data_list)\nroc_auc = np.zeros(n_data) ; acc = np.zeros(n_data)\nprecision = np.zeros(n_data) ; recall = np.zeros(n_data)\nf1_score = np.zeros(n_data)\n\n#data_id = 0\nfor data_id in range(n_data):\n X_train,X_test,y_train,y_test = read_data(data_id)\n acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id] =\\\n measure_performance(X_train,X_test,y_train,y_test)\n print(data_id,acc[data_id],roc_auc[data_id],precision[data_id],recall[data_id],f1_score[data_id]) \n ", "data_name: 1paradox\n(array([-1., 1.]), array([60, 60]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n0 0.7833333333333333 0.8742655699177438 0.65625 0.9130434782608695 0.7636363636363634\ndata_name: 2peptide\n(array([-1., 1.]), array([23, 23]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n1 0.9565217391304348 1.0 1.0 0.9166666666666666 0.9565217391304348\ndata_name: 3stigma\n(array([-1., 1.]), array([2725, 2725]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n2 0.996697247706422 0.9999859864715552 1.0 0.9932330827067669 0.9966050546963411\ndata_name: 4nki\n(array([-1., 1.]), array([77, 77]))\nbest_hyper_parameters: {'alpha': 0.1, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n3 0.7142857142857143 0.8418918918918918 0.6829268292682927 0.7567567567567568 0.7179487179487181\ndata_name: 5mental\n(array([-1., 1.]), array([147, 147]))\nbest_hyper_parameters: {'alpha': 1.0, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n4 0.6598639455782312 0.6890289103039289 0.6521739130434783 0.6338028169014085 0.6428571428571428\ndata_name: 6smoking\n(array([-1., 1.]), array([722, 722]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n5 1.0 1.0 1.0 1.0 1.0\ndata_name: 7anemia\n(array([-1., 1.]), array([43, 43]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n6 0.8372093023255814 0.8826086956521738 0.8095238095238095 0.85 0.8292682926829269\ndata_name: 8language\n(array([-1., 1.]), array([267, 267]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 0.4, 'penalty': 'elasticnet'}\n7 0.7378277153558053 0.8552912131073953 0.8478260869565217 0.582089552238806 0.6902654867256638\ndata_name: 9coag\n(array([-1., 1.]), array([504, 504]))\nbest_hyper_parameters: {'alpha': 0.1, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n8 0.6170634920634921 0.670016381048387 0.5992779783393501 0.6693548387096774 0.6323809523809524\ndata_name: 10tazamia\n(array([-1., 1.]), array([124, 124]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n9 0.7338709677419355 0.8323337679269883 0.7222222222222222 0.8 0.759124087591241\ndata_name: 11hepato\n(array([-1., 1.]), array([63, 63]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 0.4, 'penalty': 'elasticnet'}\n10 0.7142857142857143 0.7358870967741936 0.7096774193548387 0.7096774193548387 0.7096774193548389\ndata_name: 12heat\n(array([-1., 1.]), array([83, 83]))\nbest_hyper_parameters: {'alpha': 0.1, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n11 0.7349397590361446 0.7555555555555554 0.7674418604651163 0.7333333333333333 0.7499999999999999\ndata_name: 13ef\n(array([-1., 1.]), array([93, 93]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.6, 'penalty': 'elasticnet'}\n12 1.0 1.0 1.0 1.0 1.0\ndata_name: 14cervix\n(array([-1., 1.]), array([24, 24]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n13 0.9583333333333334 1.0 1.0 0.9333333333333333 0.9655172413793104\ndata_name: 15heart\n(array([-1., 1.]), array([138, 138]))\nbest_hyper_parameters: {'alpha': 0.1, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n14 0.7681159420289855 0.8691934121621623 0.8620689655172413 0.6756756756756757 0.7575757575757576\ndata_name: 16liver\n(array([-1., 1.]), array([167, 167]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.6, 'penalty': 'elasticnet'}\n15 0.6407185628742516 0.684051724137931 0.5957446808510638 0.9655172413793104 0.7368421052631579\ndata_name: 17nwosu\n(array([-1., 1.]), array([59, 59]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.6, 'penalty': 'elasticnet'}\n16 0.9830508474576272 1.0 0.9629629629629629 1.0 0.9811320754716981\ndata_name: 18school\n(array([-1., 1.]), array([68, 68]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n17 0.8088235294117647 0.8576388888888888 0.8275862068965517 0.75 0.7868852459016394\ndata_name: 19ibs\n(array([-1., 1.]), array([33, 33]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.2, 'penalty': 'elasticnet'}\n18 0.9090909090909091 0.962962962962963 0.8947368421052632 0.9444444444444444 0.918918918918919\ndata_name: 21survival\n(array([-1., 1.]), array([123, 123]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 1.0, 'penalty': 'elasticnet'}\n19 0.7154471544715447 0.852116402116402 0.6811594202898551 0.7833333333333333 0.7286821705426356\ndata_name: 29parkinson\n(array([-1., 1.]), array([48, 48]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n20 0.75 0.8536155202821869 0.6666666666666666 0.8571428571428571 0.75\ndata_name: 30paradox2\n(array([-1., 1.]), array([52, 52]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n21 0.9807692307692307 0.9791666666666666 1.0 0.9583333333333334 0.9787234042553191\ndata_name: 31renal\n(array([-1., 1.]), array([47, 47]))\nbest_hyper_parameters: {'alpha': 1.0, 'l1_ratio': 0.0, 'penalty': 'elasticnet'}\n22 0.851063829787234 0.9293478260869565 0.8076923076923077 0.9130434782608695 0.8571428571428572\ndata_name: 33svr\n(array([-1., 1.]), array([41, 41]))\nbest_hyper_parameters: {'alpha': 0.001, 'l1_ratio': 0.8, 'penalty': 'elasticnet'}\n23 0.975609756097561 0.9856459330143541 1.0 0.9473684210526315 0.972972972972973\ndata_name: 35pcos\n(array([-1., 1.]), array([177, 177]))\nbest_hyper_parameters: {'alpha': 0.01, 'l1_ratio': 0.6, 'penalty': 'elasticnet'}\n24 0.8813559322033898 0.9692288049029623 0.8526315789473684 0.9204545454545454 0.8852459016393441\ndata_name: 36probiotic\n(array([-1., 1.]), array([10, 10]))\n" ], [ "print('acc_mean:',acc.mean())\nprint('roc_mean:',roc_auc.mean())\nprint('precision:',precision.mean())\nprint('recall:',recall.mean())\nprint('f1_score:',f1_score.mean())", "acc_mean: 0.830577195701667\nroc_mean: 0.888677966005197\nprecision: 0.837447962689654\nrecall: 0.8355001319662938\nf1_score: 0.8300109080109538\n" ], [ "np.savetxt('result_knn7_LR.dat',(roc_auc,acc,precision,recall,f1_score),fmt='%f')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d014488a5c435ff88bdde0fdc2165052ab7318a6
570,720
ipynb
Jupyter Notebook
artificial_intelligence/01 - ConsumptionRegression/All campus/Fpolis.ipynb
LeonardoSanBenitez/LorisWeb
68c4aecab408c4432d39326ed43899e1dc33f1c5
[ "MIT" ]
null
null
null
artificial_intelligence/01 - ConsumptionRegression/All campus/Fpolis.ipynb
LeonardoSanBenitez/LorisWeb
68c4aecab408c4432d39326ed43899e1dc33f1c5
[ "MIT" ]
2
2019-12-23T22:49:27.000Z
2020-07-19T03:51:09.000Z
artificial_intelligence/01 - ConsumptionRegression/All campus/Fpolis.ipynb
LeonardoSanBenitez/LorisWeb
68c4aecab408c4432d39326ed43899e1dc33f1c5
[ "MIT" ]
null
null
null
72.546079
74,616
0.663329
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\n%matplotlib inline\n\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', 1000)", "_____no_output_____" ] ], [ [ "# Importação dos dados\n* Um CSV para cada campus\n* data: de 2019-02-18 (segunda semana de aula) até 019-06-28 (penultima semana de aula)\n* Granularidade: 1h (potência agregada pela média)\n* Dados climáticos obtidos pela plataforma yr\n* Colunas\n * potencia ativa da fase A (Kw)\n * Temperatura (ºC)\n * Pressão (hPa)\n", "_____no_output_____" ] ], [ [ "raw = pd.read_csv ('../../datasets/2019-1 Fpolis.csv', sep=',')", "_____no_output_____" ], [ "raw.describe()", "_____no_output_____" ], [ "(ax1, ax2,ax3) = raw.plot(subplots=True)\nax1.legend(loc='upper left')\nax2.legend(loc='upper left')\nax3.legend(loc='upper left')", "_____no_output_____" ], [ "raw['pa'].plot.kde().set_xlabel(\"Potência Ativa (KW)\")", "_____no_output_____" ], [ "raw['temp_celsius'].plot.kde().set_xlabel(\"Temperatura (ºC)\")", "_____no_output_____" ], [ "raw['pressao'].plot.kde().set_xlabel(\"Pressão (hPa)\")", "_____no_output_____" ] ], [ [ "# Data cleaning\n* Filtra horários de aula\n* remover linhas incompletas (sistema fora do ar)\n* remover oulier (falhas na coleta de dados).\n* remover dias não-letivos\n* remover dias com falhas na medição (sistema fora do ar)", "_____no_output_____" ] ], [ [ "processed = raw.dropna()\nprocessed = processed.set_index(pd.to_datetime (processed['momento'])).drop('momento', axis=1)", "_____no_output_____" ], [ "\n(ax1, ax2, ax3) = processed['2019-05-20 00:00:00' : '2019-05-25 00:00:00'].plot(subplots=True, sharex=True)\nax1.legend(loc='upper left')\nax2.legend(loc='upper left')\nax3.legend(loc='upper left')\n#ax1.legend(loc=\"upper right\")\n\n", "_____no_output_____" ], [ "processed = processed[processed['pa']<500]", "_____no_output_____" ], [ "processed = processed[processed['pa']>10]", "_____no_output_____" ], [ "## Remove fins de semana\n# Create an index of just the date portion of your index (this is the slow step)\ndfDays = pd.to_datetime(processed.index.date)\n\n# Create a range of business days over that period\ndfBdays = pd.bdate_range(start=processed.index[0].date(), end=processed.index[-1].date())\n\n#Filter the series to just those days contained in the business day range.\nfiltered = processed[dfDays.isin(dfBdays)]", "_____no_output_____" ], [ "## Removendo dias não-letivos ou com erros\n# março\n# abril 4, 8, 15, 16,17,18,19, 22, 25, 29\n# maio 1, 9, 10, 14, 15, 16, 17\n# junho 20, 21\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 4))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 8))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 15))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 16))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 17))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 18))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 19))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 22))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 25))]\nfiltered = filtered[~((filtered.index.month == 4) & (filtered.index.day == 29))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 1))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 9))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 10))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 14))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 15))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 16))]\nfiltered = filtered[~((filtered.index.month == 5) & (filtered.index.day == 17))]\nfiltered = filtered[~((filtered.index.month == 6) & (filtered.index.day == 20))]\nfiltered = filtered[~((filtered.index.month == 6) & (filtered.index.day == 21))]", "_____no_output_____" ], [ "# Selecionando horários de aula\nfiltered1 = filtered.between_time('08:00:00', '11:00:00')\nfiltered2 = filtered.between_time('14:00:00', '17:00:00')\nfiltered = pd.concat([filtered1, filtered2])", "_____no_output_____" ], [ "filtered = filtered[~((filtered['pa']<50) & (filtered['temp_celsius']>27))]", "_____no_output_____" ], [ "f, (ax1, ax2) = plt.subplots(1, 2, sharey=True,figsize=(15,6))\nax1.scatter(filtered['temp_celsius'], filtered['pa'], Alpha=0.5)\nax1.set_xlabel(\"temperatura (ºC)\")\nax1.set_ylabel(\"Potência ativa (KW)\")\nax2.scatter(filtered['pressao'], filtered['pa'], Alpha=0.5)\nax2.set_xlabel(\"Pressão (hPa)\")\nax2.set_ylabel(\"Potência ativa (KW)\")", "_____no_output_____" ], [ "filtered.describe()", "_____no_output_____" ], [ "filtered['id']=1\nfiltered", "_____no_output_____" ], [ "# Cross validation split\nfrom sklearn.model_selection import train_test_split \nX = filtered.drop('pa', axis=1)\ny = filtered ['pa']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5) ", "_____no_output_____" ], [ "X_test", "_____no_output_____" ], [ "y_test", "_____no_output_____" ] ], [ [ "# Linear Regression", "_____no_output_____" ] ], [ [ "model1 = LinearRegression()\nmodel1.fit (X_train, y_train)\npd.DataFrame(model1.coef_,X.columns,columns=['Coefficient'])", "_____no_output_____" ], [ "from sklearn import metrics\ny_hat1 = model1.predict(X_test)\n\nprint (\"MAE: \", metrics.mean_absolute_error(y_test, y_hat1))\nprint (\"RMSE: \", np.sqrt(metrics.mean_squared_error(y_test, y_hat1)))\nprint (\"Percentual: \", metrics.mean_absolute_error(y_test,y_hat1)/y_test.mean()*100, \"%\")\n\n# Previsto vs real\nline = np.arange(0, 250, 1)\n\nplt.scatter(y_test,y_hat1, Alpha=0.6)\nplt.scatter(line,line)\nplt.grid(True)\nplt.xlabel(\"Valores reais\")\nplt.ylabel(\"Valores previstos\")", "MAE: 26.97950688636858\nRMSE: 34.29523555329796\nPercentual: 38.47309204975011 %\n" ], [ "plt.scatter(X['temp_celsius'], y,color='g')\nplt.scatter(X['temp_celsius'], model1.predict(X),color='k')\n\nplt.show()", "_____no_output_____" ] ], [ [ "# Random Forest", "_____no_output_____" ] ], [ [ "import sklearn.metrics as metrics\nimport math\nfrom sklearn.ensemble import RandomForestRegressor", "_____no_output_____" ], [ "mae1 = {}\nmae2 = {}\nfor k in range(1,15, 1):\n model2 = RandomForestRegressor(max_depth=k, n_estimators=100, criterion='mae').fit(X_train,y_train)\n y_hat = model2.predict(X_train)\n mae1[k] = metrics.mean_absolute_error(y_train,y_hat)\n y_hat = model2.predict(X_test)\n mae2[k] = metrics.mean_absolute_error(y_test,y_hat)\n\nplt.figure()\nplt.plot(list(mae1.keys()), list(mae1.values()), label='Erro no conunto de treinamento')\nplt.plot(list(mae2.keys()), list(mae2.values()), label='Erro no conunto de teste')\nplt.legend(loc='lower left')\nplt.xlabel(\"Altura máxima\")\nplt.ylabel(\"MAE\")\nplt.grid(True)", "_____no_output_____" ], [ "# Random Forest\nmodel2 = RandomForestRegressor(max_depth=3, n_estimators=100)\nmodel2.fit(X_train,y_train)\n\n# Model Evaluation\ny_hat2 = model2.predict(X_test)\n\nprint (\"MAE: \", metrics.mean_absolute_error(y_test,y_hat2))\nprint (\"RMSE: \", math.sqrt(metrics.mean_squared_error(y_test,y_hat2)))\nprint (\"Percentual: \", metrics.mean_absolute_error(y_test,y_hat2)/y_test.mean()*100, \"%\")\n\n# Feature analysis\nprint (\"=====================================\")\nprint (\"FEATURE IMPORTANCE:\")\nfor i in range(model2.feature_importances_.size):\n print (X_train.columns[i], \"=\", model2.feature_importances_[i])", "MAE: 22.09663368502784\nRMSE: 29.054007622458546\nPercentual: 31.51005781292511 %\n=====================================\nFEATURE IMPORTANCE:\ntemp_celsius = 0.8680984607970913\npressao = 0.13190153920290898\n" ], [ "# Previsto vs real\nline = np.arange(0, 250, 1)\n\nplt.scatter(y_test,y_hat, Alpha=0.6)\nplt.scatter(line,line)\nplt.grid(True)", "_____no_output_____" ], [ "plt.scatter(X['temp_celsius'], y,color='g')\nplt.scatter(X['temp_celsius'], model2.predict(X),color='k')\nplt.xlabel(\"Temperatura (ºC)\")\nplt.ylabel(\"Potência Ativa (KW)\")\nplt.show()", "_____no_output_____" ], [ "import pickle\nwith open('fpolis_trained_model.pkl', 'wb') as f:\n pickle.dump(model2, f)\nwith open('fpolis_trained_model.pkl', 'rb') as f:\n model2_loaded = pickle.load(f)\nmodel2_loaded", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d0144cae376d3ca4c75e43fe7fbfa0255895f971
6,428
ipynb
Jupyter Notebook
examples/AugLy_image.ipynb
AghilesAzzoug/AugLy
6b8eb0efbf18a74cf112b363187ab9057dc60cce
[ "MIT" ]
null
null
null
examples/AugLy_image.ipynb
AghilesAzzoug/AugLy
6b8eb0efbf18a74cf112b363187ab9057dc60cce
[ "MIT" ]
null
null
null
examples/AugLy_image.ipynb
AghilesAzzoug/AugLy
6b8eb0efbf18a74cf112b363187ab9057dc60cce
[ "MIT" ]
null
null
null
28.825112
100
0.480865
[ [ [ "# Note: restart runtime after this import before running the augmentations\n!pip install -U augly\n!sudo apt-get install python3-magic", "_____no_output_____" ], [ "import os\nimport augly.image as imaugs\nimport augly.utils as utils\nfrom IPython.display import display\n\n# Get input image, scale it down to avoid taking up the whole screen\ninput_img_path = os.path.join(\n utils.TEST_URI, \"image\", \"inputs\", \"dfdc_1.jpg\"\n)\n\n# We can use the AugLy scale augmentation\ninput_img = imaugs.scale(input_img_path, factor=0.2)\ndisplay(input_img)", "_____no_output_____" ], [ "# Now we can apply various augmentations to the scaled image!\ndisplay(\n imaugs.meme_format(\n input_img,\n caption_height=75,\n meme_bg_color=(0, 0, 0),\n text_color=(255, 255, 255),\n )\n)", "_____no_output_____" ], [ "\"\"\"\nYou can optionally pass in a metadata list, to which metadata about the\naugmentation will be appended, including kwargs, input & output\ndimensions, and intensity (defined based on the kwargs for each\naugmentation).\n\"\"\"\nmeta = []\ndisplay(imaugs.shuffle_pixels(input_img, factor=0.3, metadata=meta))\nmeta", "_____no_output_____" ], [ "\"\"\"\nYou can also pass in bounding boxes, which will be transformed along with\nthe image & included in the metadata (note: you must provide metadata to\nget the transformed bboxes)\n\"\"\"\nmeta = []\ndisplay(\n imaugs.rotate(\n input_img,\n degrees=15,\n metadata=meta,\n bboxes=[(20, 6, 250, 180)],\n bbox_format=\"pascal_voc\",\n )\n)\nmeta", "_____no_output_____" ], [ "# For all the augmentations, we have class-based definitions as well as\n# functional\nmeta = []\naug = imaugs.PerspectiveTransform(sigma=20.0)\ndisplay(aug(input_img, metadata=meta))\nmeta", "_____no_output_____" ], [ "\"\"\"\nFor some augmentations, we also provide versions that will randomly sample\nfrom a set of parameters (e.g. for ChangeAspectRatio, RandomAspectRatio\nsamples an emoji from Twitter's Twemoji set which we provide in the augly\npackage). The metadata will contain the actual sampled param values.\n\"\"\"\nmeta = []\naug = imaugs.RandomAspectRatio()\ndisplay(aug(input_img, metadata=meta))\nmeta", "_____no_output_____" ], [ "# You can also compose several transformations together\nmeta = []\naug = imaugs.Compose(\n [\n imaugs.Saturation(factor=2.0),\n imaugs.OverlayOntoScreenshot(\n template_filepath=os.path.join(\n utils.SCREENSHOT_TEMPLATES_DIR, \"mobile.png\"\n ),\n ),\n imaugs.Scale(factor=0.6),\n ]\n)\ndisplay(aug(input_img, metadata=meta))\nmeta", "_____no_output_____" ], [ "# AugLy also integrates seamlessly with PyTorch transforms\n# Note: you must have torchvision installed, which it is by default in colab\nimport torchvision.transforms as transforms\n\naug = transforms.Compose(\n [\n imaugs.Brightness(factor=2.0),\n imaugs.RandomRotation(),\n transforms.ToTensor(),\n ]\n)\ntype(aug(input_img))", "_____no_output_____" ], [ "# We also provide a numpy wrapper in case your data is in np.ndarray format\nimport numpy as np\nfrom augly.image import aug_np_wrapper, overlay_emoji\n\nnp_image = np.zeros((300, 300))\n# pass in function arguments as kwargs\nnp_aug_img = aug_np_wrapper(np_image, overlay_emoji, **{'opacity': 0.5, 'y_pos': 0.45})\ntype(np_aug_img)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d0144d6e363d3b0588542dae18b761aff6cdc1e3
4,225
ipynb
Jupyter Notebook
notebooks/Learning Units/Linear Regression/Linear Regression - Chapter 1 - Introduction.ipynb
ValentinCalomme/skratch
f234a9b95adfdb20d231d7f8c761ab1098733cb8
[ "MIT" ]
4
2017-10-27T07:23:34.000Z
2020-02-11T18:02:39.000Z
notebooks/Learning Units/Linear Regression/Linear Regression - Chapter 1 - Introduction.ipynb
ValentinCalomme/skratch
f234a9b95adfdb20d231d7f8c761ab1098733cb8
[ "MIT" ]
null
null
null
notebooks/Learning Units/Linear Regression/Linear Regression - Chapter 1 - Introduction.ipynb
ValentinCalomme/skratch
f234a9b95adfdb20d231d7f8c761ab1098733cb8
[ "MIT" ]
null
null
null
45.923913
483
0.651361
[ [ [ "# Introduction\n\nLinear Regression is one of the most famous and widely used machine learning algorithms out there. It assumes that the target variable can be explained as a linear combination of the input features. What does this mean? It means that the target can be viewed as a weighted sum of each feature. Let’s use a practical example to illustrate that.\n\nLet’s say that we are opening a restaurant, we make great food but we want to know how much to charge for it. We can be very pragmatic and say that the cost of the meal is directly related to what is in it. We can, for instance, have a rule that each ingredient costs a certain amount, and based on how much there is of each ingredient in the dish, we can calculate its price. There may also be a fixed minimum price for each dish. Mathematically, this is called the intercept.", "_____no_output_____" ] ], [ [ "fixed_price = 5\ningredient_costs = {\"meat\": 10,\n \"fish\": 13,\n \"vegetables\": 2,\n \"fries\": 3}\n \n \ndef price(**ingredients):\n \"\"\" returns the price of a dish \"\"\"\n \n cost = 0\n \n for name, quantity in ingredients.items():\n cost += ingredient_costs[name] * quantity\n \n return cost", "_____no_output_____" ] ], [ [ "Linear Regression makes the assumption that the target, in this case, the price, can be explained like this. The model will know about the quantity of each ingredient, but it will have to infer what the fixed price is, and what is the cost of each ingredient.\n\n>It is important to remember that cost, in this situation, is rather abstract. It represents how much each feature affect the outcome, and in which way. Therefore, features can have negative costs for instance.\n\nIn the univariate case, where there is only one feature, Linear Regression can be thought of as trying to fit a line through points.\n\n![](https://skratch.valentincalomme.com/wp-content/uploads/2018/09/polymial_regression_1.gif)", "_____no_output_____" ], [ "Now, Linear Regression is one of the most popular algorithms because it can do much more than fit straight lines through data. Indeed, with a simple trick, we can make it fit polynomial functions, making it much more powerful.\n\nThe trick is to \"replace\" the original features with a polynomial of a higher degree. In the univariate case, this comes down to not only using the feature itself but also its squared value, cubed value, and so on. For instance, instead of using a single feature $X = 2$, we end up with features $X = 2, 4, 8, 16, 32$, and so on. More features mean that the model is explained by more weights, and these weights can express more complex functions.\n\n![](https://skratch.valentincalomme.com/wp-content/uploads/2018/09/polymial_regression_4.gif)", "_____no_output_____" ], [ "A Linear Regression model's goal is to find the coefficients, also called weights, which will fit the data best. In order to define what best means, we need to define a loss function. This loss function, as we will see later, can be tweaked to alter how the weights are learned. We will also see that finding the best weights in order to minimize the loss function can be done in different ways.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d0144dfe626a8394f56488281817630c76577ac4
82,876
ipynb
Jupyter Notebook
Machine Learning Problem-Statements/Iris/Iris_Dataset_Machine_Learning.ipynb
JukMR/Hacktoberfest2020
3407a96e38b70730c38dc177c393ac1f9bb9da3e
[ "MIT" ]
16
2020-10-02T17:47:43.000Z
2021-11-12T12:23:17.000Z
Machine Learning Problem-Statements/Iris/Iris_Dataset_Machine_Learning.ipynb
JukMR/Hacktoberfest2020
3407a96e38b70730c38dc177c393ac1f9bb9da3e
[ "MIT" ]
28
2020-10-02T19:41:22.000Z
2020-10-31T06:15:19.000Z
Machine Learning Problem-Statements/Iris/Iris_Dataset_Machine_Learning.ipynb
JukMR/Hacktoberfest2020
3407a96e38b70730c38dc177c393ac1f9bb9da3e
[ "MIT" ]
71
2020-10-02T17:58:47.000Z
2021-10-02T15:33:32.000Z
136.759076
30,170
0.849836
[ [ [ "######The Iris flower data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems. The dataset consists of 50 samples from each of three species of Iris (Iris Setosa, Iris virginica, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.\n\n", "_____no_output_____" ], [ "Import Libraries", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd \nfrom pandas import Series, DataFrame\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "iris = pd.read_csv(\"iris.csv\")", "_____no_output_____" ], [ "iris.head()", "_____no_output_____" ] ], [ [ "##### *We can see that we have a column named ID that we donot need , so let's drop it !*", "_____no_output_____" ] ], [ [ "iris.drop(\"Id\", axis=1, inplace = True)", "_____no_output_____" ], [ "iris.info()", "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 SepalLengthCm 150 non-null float64\n 1 SepalWidthCm 150 non-null float64\n 2 PetalLengthCm 150 non-null float64\n 3 PetalWidthCm 150 non-null float64\n 4 Species 150 non-null object \ndtypes: float64(4), object(1)\nmemory usage: 6.0+ KB\n" ], [ "figure = iris[iris.Species == 'Iris-setosa'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', color='red', label='Setosa')\niris[iris.Species == 'Iris-versicolor'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', color='blue', label='Versicolor', ax=figure)\niris[iris.Species == 'Iris-virginica'].plot(kind='scatter', x='SepalLengthCm', y='SepalWidthCm', color='green', label='Virginica', ax=figure)\n\nfigure.set_xlabel('Sepal Length')\nfigure.set_ylabel('Sepal Width')\nfigure.set_title('Sepal Length Vs Width')\n\nfigure=plt.gcf()\nfigure.set_size_inches(7, 4)\nplt.show()", "_____no_output_____" ], [ "figure = iris[iris.Species == 'Iris-setosa'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', color='red', label='Setosa')\niris[iris.Species == 'Iris-versicolor'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', color='blue', label='Versicolor', ax=figure)\niris[iris.Species == 'Iris-virginica'].plot(kind='scatter', x='PetalLengthCm', y='PetalWidthCm', color='green', label='Virginica', ax=figure)\n\nfigure.set_xlabel('Petal Length')\nfigure.set_ylabel('Petal Width')\nfigure.set_title('Petal Length Vs Width')\n\nfigure=plt.gcf()\nfigure.set_size_inches(7, 4)\nplt.show()", "_____no_output_____" ], [ "plt.figure(figsize=(15,10)) \nplt.subplot(2,2,1) \nsns.boxplot(x='Species',y='SepalLengthCm',data=iris) \nplt.subplot(2,2,2) \nsns.boxplot(x='Species',y='SepalWidthCm',data=iris) \nplt.subplot(2,2,3) \nsns.boxplot(x='Species',y='PetalLengthCm',data=iris) \nplt.subplot(2,2,4) \nsns.boxplot(x='Species',y='PetalWidthCm',data=iris) ", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier \nfrom sklearn import svm \nfrom sklearn import metrics \nfrom sklearn.tree import DecisionTreeClassifier\nimport xgboost as xgb", "_____no_output_____" ] ], [ [ "Splitting The Data into Training And Testing Dataset", "_____no_output_____" ] ], [ [ "train, test = train_test_split(iris, test_size=0.2) \nprint(train.shape)\nprint(test.shape)", "(120, 5)\n(30, 5)\n" ], [ "train_X = train[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']] \ntrain_y = train.Species \n\ntest_X = test[['SepalLengthCm','SepalWidthCm','PetalLengthCm','PetalWidthCm']] \ntest_y = test.Species ", "_____no_output_____" ] ], [ [ "1. Logistic Regression", "_____no_output_____" ] ], [ [ "model1 = LogisticRegression()\nmodel1.fit(train_X, train_y)\nprediction1 = model1.predict(test_X)\nprint('Accuracy of Logistic Regression is: ', metrics.accuracy_score(prediction1, test_y))", "Accuracy of Logistic Regression is: 0.9333333333333333\n" ] ], [ [ "2. SVM Classifier", "_____no_output_____" ] ], [ [ "model2 = svm.SVC() \nmodel2.fit(train_X, train_y)\nprediction2 = model2.predict(test_X)\nprint('Accuracy of SVM is: ', metrics.accuracy_score(prediction2, test_y))", "Accuracy of SVM is: 0.9666666666666667\n" ] ], [ [ "3. K-Nearest Neighbors", "_____no_output_____" ] ], [ [ "model3 = KNeighborsClassifier(n_neighbors=3) # this examines 3 neighbors \nmodel3.fit(train_X, train_y)\nprediction3 = model3.predict(test_X)\nprint('Accuracy of KNN is: ', metrics.accuracy_score(prediction3, test_y))", "Accuracy of KNN is: 0.9666666666666667\n" ] ], [ [ "4. Decision Tree", "_____no_output_____" ] ], [ [ "model4 = DecisionTreeClassifier()\nmodel4.fit(train_X, train_y)\nprediction4 = model4.predict(test_X)\nprint('Accuracy of Decision Tree is: ', metrics.accuracy_score(prediction4, test_y))", "Accuracy of Decision Tree is: 0.9\n" ] ], [ [ "5. XGBoost", "_____no_output_____" ] ], [ [ "model5 = xgb.XGBClassifier()\nmodel5.fit(train_X, train_y)\nprediction5 = model5.predict(test_X)\nprint('Accuracy of xgb classifier is: ', metrics.accuracy_score(prediction5, test_y))", "Accuracy of xgb classifier is: 0.9333333333333333\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0145b5e8dfd3a1909920e5669300c29550046be
473,862
ipynb
Jupyter Notebook
Back Test 2021_02-2021_04.ipynb
tonghuang-uw/Project_2
7b57b983d3497cfbf6feaeb49f4e5d992455711b
[ "Unlicense", "MIT" ]
null
null
null
Back Test 2021_02-2021_04.ipynb
tonghuang-uw/Project_2
7b57b983d3497cfbf6feaeb49f4e5d992455711b
[ "Unlicense", "MIT" ]
null
null
null
Back Test 2021_02-2021_04.ipynb
tonghuang-uw/Project_2
7b57b983d3497cfbf6feaeb49f4e5d992455711b
[ "Unlicense", "MIT" ]
null
null
null
105.4198
57,764
0.774394
[ [ [ "import pandas as pd\nfrom pathlib import Path\nimport yfinance as yf\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ], [ "df_50 = pd.read_csv(\n Path(\"./Data/QM_50.csv\")\n)", "_____no_output_____" ], [ "tickers = list(df_50[\"Tickers\"])", "_____no_output_____" ], [ "historical = yf.Ticker(\"PWR\").history(period=\"2y\")", "_____no_output_____" ], [ "price_3month = historical.loc[\"2021-02\":\"2021-04\"]\nprice_3month[\"return\"] = price_3month[\"Close\"].pct_change()\nprice_3month.head()", "_____no_output_____" ], [ "lags = 5", "_____no_output_____" ], [ "cols = []\nfor lag in range(1, lags + 1):\n col = f'lag_{lag}'\n price_3month[col] = price_3month['Close'].shift(lag)\n cols.append(col)\nprice_3month.dropna(inplace=True)\nprice_3month", "_____no_output_____" ], [ "sum_ret = np.zeros(63)\nfor ticker in tickers:\n df_price = yf.Ticker(ticker).history(period=\"2y\")\n df_price_3month = df_price.loc[\"2021-02\":\"2021-04\"]\n df_price_3month[\"return\"] = df_price_3month[\"Close\"].pct_change()\n cum_ret = weighting * np.array((1 + df_price_3month[\"return\"]).cumprod())\n sum_ret = sum_ret + cum_ret\n print(ticker, cum_ret[-1])", "PWR 0.026538166216534213\nTRMB 0.02421736518661824\nENPH 0.014945798297376543\nIDXX 0.02229763109800747\nAES 0.022548170744512454\nCPRT 0.02246256565952145\nTT 0.02444604079193229\nFDX 0.024332540344888895\nTTWO 0.017455957275998638\nTEL 0.021676619404143782\nXYL 0.022846181439970473\nHWM 0.02551696635498839\nPTC 0.019115328823562955\nFCX 0.02677579527250971\nNWSA 0.02715614906404981\nWY 0.024324324161116656\nPKI 0.01707343500899863\nTWTR 0.020972275495901586\nTSLA 0.01689525022335563\nNWS 0.025980563663937906\nCZR 0.027638415853277705\nDE 0.02544466612673594\nISRG 0.023186929996451305\nHON 0.02291136113795963\nURI 0.025264530460285522\nMS 0.024501471526833973\nVIAC 0.015721161855254916\nPAYC 0.01939260899613066\nSIVB 0.025051147509203994\nZBRA 0.024550877442806162\nALB 0.02026828728833709\nAAPL 0.01962970950367702\nCAT 0.02480956295508654\nTPR 0.03004709384302057\nMPWR 0.019678626608870577\nAPTV 0.02073791092592818\nEXPE 0.027687351375142874\nTGT 0.022661528620731446\nNTAP 0.022754188517547992\nDXC 0.02581176458620561\nCARR 0.02260671397980962\nUAA 0.027026125375466198\nLRCX 0.024806316983991526\nUPS 0.026257614416632106\nWRK 0.02704951860862\nCE 0.02579103751374104\nQRVO 0.020531368798265896\nDVA 0.019616194120671972\nETSY 0.019511212555144326\nIP 0.0235433823731439\n" ], [ "plt.plot(sum_ret)", "_____no_output_____" ] ], [ [ "# Lagged Price Machine Learning Testing", "_____no_output_____" ] ], [ [ "df1_50 = pd.read_csv(\n Path(\"./Data/QM_50_6month.csv\")\n)", "_____no_output_____" ], [ "tickers = list(df1_50[\"Tickers\"])", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.svm import SVC\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.metrics import accuracy_score", "_____no_output_____" ], [ "historical = yf.Ticker(\"idxx\").history(period=\"max\")\nhistorical[\"return\"] = historical[\"Close\"].pct_change()\n\nlags = 5\n\ncols = []\nfor lag in range(1, lags + 1):\n col = f'lag_{lag}'\n historical[col] = historical['Close'].shift(lag)\n cols.append(col)\nhistorical_train = historical.loc[:\"2021-01\"]\nhistorical_train.dropna(inplace=True)\nhistorical_train", "_____no_output_____" ], [ "price_3month = historical.loc[\"2021-02\":\"2021-04\"]\ndisplay(price_3month.head())", "_____no_output_____" ], [ "model = LogisticRegression(C=1e6, solver=\"lbfgs\",\n multi_class=\"auto\",\n max_iter=1000)", "_____no_output_____" ], [ "model.fit(historical_train[cols],np.sign(historical_train[\"return\"]))", "_____no_output_____" ], [ "price_3month[\"prediction\"] = model.predict(price_3month[cols])\nprice_3month[\"prediction\"].value_counts()", "_____no_output_____" ], [ "price_3month[\"prediction\"].value_counts()", "_____no_output_____" ], [ "print(classification_report(price_3month[\"prediction\"],\n np.sign(price_3month[\"return\"])))", " precision recall f1-score support\n\n -1.0 0.14 1.00 0.24 4\n 1.0 1.00 0.58 0.73 59\n\n accuracy 0.60 63\n macro avg 0.57 0.79 0.49 63\nweighted avg 0.95 0.60 0.70 63\n\n" ], [ "price_3month[\"strategy\"] = price_3month[\"prediction\"] * price_3month[\"return\"]", "_____no_output_____" ], [ "price_3month[[\"strategy\",\"return\"]].cumsum().apply(np.exp).plot()", "_____no_output_____" ] ], [ [ "## SVC", "_____no_output_____" ] ], [ [ "poly_kernel_svm_clf = Pipeline([\n (\"scaler\", StandardScaler()),\n (\"svm_clf\", SVC())\n ])", "_____no_output_____" ], [ "poly_kernel_svm_clf.fit(historical_train[cols],np.sign(historical_train[\"return\"]))", "_____no_output_____" ], [ "price_3month[\"prediction\"] = model.predict(price_3month[cols])\nprice_3month[\"prediction\"].value_counts()", "_____no_output_____" ], [ "print(classification_report(price_3month[\"prediction\"],\n np.sign(price_3month[\"return\"])))", " precision recall f1-score support\n\n -1.0 0.14 1.00 0.24 4\n 1.0 1.00 0.58 0.73 59\n\n accuracy 0.60 63\n macro avg 0.57 0.79 0.49 63\nweighted avg 0.95 0.60 0.70 63\n\n" ], [ "price_3month[\"strategy\"] = price_3month[\"prediction\"] * price_3month[\"return\"]\nprice_3month[[\"strategy\",\"return\"]].cumsum().apply(np.exp).plot()", "_____no_output_____" ], [ "lags = 21\n\nweighting = 1/50\n\nstrat = np.zeros(63)\n\nactual = np.zeros(63)\n\nfor ticker in tickers:\n # Pull the historical data\n df_price = yf.Ticker(ticker).history(period=\"max\")\n df_price[\"return\"] = df_price[\"Close\"].pct_change()\n \n # Create lags price\n cols = []\n for lag in range(1, lags + 1):\n col = f'lag_{lag}'\n df_price[col] = df_price['Close'].shift(lag)\n cols.append(col)\n \n df_price.dropna(inplace=True)\n \n # Create train and test data\n df_price_train = df_price.loc[:\"2020-12\"]\n \n df_price_test = df_price.loc[\"2021-02\":\"2021-04\"]\n \n model = LogisticRegression(C=1e-2, solver=\"lbfgs\",\n multi_class=\"auto\",\n max_iter=1000)\n \n model.fit(df_price_train[cols], np.sign(df_price_train[\"return\"]))\n \n df_price_test[\"prediction\"] = model.predict(df_price_test[cols])\n \n df_price_test[\"strategy\"] = df_price_test[\"prediction\"] * df_price_test[\"return\"]\n \n cum_ret = df_price_test[[\"strategy\",\"return\"]].cumsum().apply(np.exp)\n \n strat = strat + np.array(cum_ret[\"strategy\"]) * weighting\n \n actual = actual + np.array(cum_ret[\"return\"]) * weighting\n \n print(ticker, cum_ret)\n ", "PWR strategy return\nDate \n2021-02-01 1.034790 1.034790\n2021-02-02 1.077388 1.077388\n2021-02-03 1.053500 1.053500\n2021-02-04 1.070550 1.070550\n2021-02-05 1.079687 1.079687\n... ... ...\n2021-04-26 1.176796 1.402189\n2021-04-27 1.182349 1.408805\n2021-04-28 1.186329 1.404078\n2021-04-29 1.197169 1.416907\n2021-04-30 1.171976 1.387090\n\n[63 rows x 2 columns]\nTRMB strategy return\nDate \n2021-02-01 1.027842 1.027842\n2021-02-02 1.075828 1.075828\n2021-02-03 1.072642 1.072642\n2021-02-04 1.060106 1.085327\n2021-02-05 1.047712 1.098166\n... ... ...\n2021-04-26 1.547791 1.295104\n2021-04-27 1.548895 1.294180\n2021-04-28 1.559990 1.284976\n2021-04-29 1.556257 1.288059\n2021-04-30 1.525681 1.262753\n\n[63 rows x 2 columns]\nENPH strategy return\nDate \n2021-02-01 0.978357 1.022122\n2021-02-02 0.945883 1.057213\n2021-02-03 0.964277 1.077772\n2021-02-04 0.967770 1.073882\n2021-02-05 0.955823 1.060625\n... ... ...\n2021-04-26 1.305521 1.022528\n2021-04-27 1.293853 1.013389\n2021-04-28 1.123271 0.879784\n2021-04-29 1.096752 0.859013\n2021-04-30 1.065467 0.834509\n\n[63 rows x 2 columns]\nIDXX strategy return\nDate \n2021-02-01 1.029120 1.029120\n2021-02-02 1.059293 1.059293\n2021-02-03 1.015703 1.015703\n2021-02-04 1.033946 1.033946\n2021-02-05 1.052188 1.052188\n... ... ...\n2021-04-26 1.150761 1.182061\n2021-04-27 1.148949 1.180200\n2021-04-28 1.149505 1.180771\n2021-04-29 1.138880 1.169857\n2021-04-30 1.131403 1.162177\n\n[63 rows x 2 columns]\nAES strategy return\nDate \n2021-02-01 1.017369 1.017369\n2021-02-02 1.056655 1.056655\n2021-02-03 1.060766 1.060766\n2021-02-04 1.073979 1.073979\n2021-02-05 1.046033 1.102672\n... ... ...\n2021-04-26 1.045180 1.170908\n2021-04-27 1.030099 1.154013\n2021-04-28 1.036418 1.161092\n2021-04-29 1.033404 1.164478\n2021-04-30 1.030446 1.161143\n\n[63 rows x 2 columns]\n" ], [ "spy = np.array(yf.Ticker(\"spy\").history(period=\"2y\").loc[\"2021-02\":\"2021-04\"][\"Close\"].pct_change().cumsum().apply(np.exp))\n\nplt.figure(figsize=(10,8))\nplt.plot(strat,'b-', label=\"Strategy\")\nplt.plot(actual,'r--', label=\"Actual\")\nplt.plot(spy,'g:', label=\"Spy\")\nplt.grid()\nplt.legend()", "_____no_output_____" ], [ "spy", "_____no_output_____" ] ], [ [ "## SMA", "_____no_output_____" ] ], [ [ "%%time\nshort_win = 5\nlong_win = 15\n\nweighting = 1/50\n\nstrat = np.zeros(63)\n\nactual = np.zeros(63)\n\nfor ticker in tickers:\n \n historical = yf.Ticker(ticker).history(period=\"max\")\n historical[\"return\"] = historical[\"Close\"].pct_change()\n historical[\"SMA_short\"] = historical[\"Close\"].rolling(window=short_win).mean().shift()\n historical[\"SMA_long\"] = historical[\"Close\"].rolling(window=long_win).mean().shift()\n historical[\"distance1\"] = (historical[\"Close\"] - historical[\"SMA_short\"]).shift()\n historical[\"distance2\"] = (historical[\"SMA_short\"] - historical[\"SMA_long\"]).shift()\n historical[\"distance3\"] = (historical[\"Close\"] - historical[\"SMA_long\"]).shift()\n \n historical.dropna(inplace=True)\n \n historical_train = historical.loc[\"2020-06\":\"2020\"].copy()\n historical_test = historical.loc[\"2021-02\":\"2021-04\"].copy()\n \n scaler = StandardScaler()\n X_scaler = scaler.fit(historical_train[[\"SMA_short\",\"SMA_long\",\"distance1\",\"distance2\",\"distance3\"]])\n X_train_scaled = X_scaler.transform(historical_train[[\"SMA_short\",\"SMA_long\",\"distance1\",\"distance2\",\"distance3\"]])\n X_test_scaled = X_scaler.transform(historical_test[[\"SMA_short\",\"SMA_long\",\"distance1\",\"distance2\",\"distance3\"]])\n \n svm_model = SVC()\n svm_model = svm_model.fit(X_train_scaled, np.sign(historical_train[[\"return\"]]))\n \n historical_test[\"prediction\"] = svm_model.predict(X_test_scaled)\n \n historical_test[\"strategy\"] = historical_test[\"prediction\"] * historical_test[\"return\"]\n \n cum_ret = historical_test[[\"strategy\",\"return\"]].cumsum().apply(np.exp)\n \n strat = strat + np.array(cum_ret[\"strategy\"]) * weighting\n \n actual = actual + np.array(cum_ret[\"return\"]) * weighting\n \n print(ticker, cum_ret)", "PWR strategy return\nDate \n2021-02-01 1.034790 1.034790\n2021-02-02 1.077388 1.077388\n2021-02-03 1.053500 1.053500\n2021-02-04 1.070550 1.070550\n2021-02-05 1.079687 1.079687\n... ... ...\n2021-04-26 1.341661 1.402189\n2021-04-27 1.347991 1.408805\n2021-04-28 1.343468 1.404078\n2021-04-29 1.355743 1.416907\n2021-04-30 1.327214 1.387090\n\n[63 rows x 2 columns]\nTRMB strategy return\nDate \n2021-02-01 1.027842 1.027842\n2021-02-02 1.075828 1.075828\n2021-02-03 1.072642 1.072642\n2021-02-04 1.085327 1.085327\n2021-02-05 1.098166 1.098166\n... ... ...\n2021-04-26 1.295104 1.295104\n2021-04-27 1.294180 1.294180\n2021-04-28 1.284976 1.284976\n2021-04-29 1.288059 1.288059\n2021-04-30 1.262753 1.262753\n\n[63 rows x 2 columns]\nENPH strategy return\nDate \n2021-02-01 1.022122 1.022122\n2021-02-02 1.057213 1.057213\n2021-02-03 1.077772 1.077772\n2021-02-04 1.073882 1.073882\n2021-02-05 1.060625 1.060625\n... ... ...\n2021-04-26 1.022528 1.022528\n2021-04-27 1.013389 1.013389\n2021-04-28 0.879784 0.879784\n2021-04-29 0.859013 0.859013\n2021-04-30 0.834509 0.834509\n\n[63 rows x 2 columns]\nIDXX strategy return\nDate \n2021-02-01 1.029120 1.029120\n2021-02-02 1.059293 1.059293\n2021-02-03 1.015703 1.015703\n2021-02-04 1.033946 1.033946\n2021-02-05 1.052188 1.052188\n... ... ...\n2021-04-26 1.208067 1.182061\n2021-04-27 1.206164 1.180200\n2021-04-28 1.206749 1.180771\n2021-04-29 1.195594 1.169857\n2021-04-30 1.187745 1.162177\n\n[63 rows x 2 columns]\nAES strategy return\nDate \n2021-02-01 1.017369 1.017369\n2021-02-02 1.056655 1.056655\n2021-02-03 1.060766 1.060766\n2021-02-04 1.073979 1.073979\n2021-02-05 1.102672 1.102672\n... ... ...\n2021-04-26 1.170908 1.170908\n2021-04-27 1.154013 1.154013\n2021-04-28 1.161092 1.161092\n2021-04-29 1.164478 1.164478\n2021-04-30 1.161143 1.161143\n\n[63 rows x 2 columns]\nCPRT strategy return\nDate \n2021-02-01 1.010165 1.010165\n2021-02-02 1.038811 1.038811\n2021-02-03 1.021994 1.021994\n2021-02-04 1.042528 1.042528\n2021-02-05 1.089704 1.089704\n... ... ...\n2021-04-26 1.134332 1.136293\n2021-04-27 1.139120 1.141089\n2021-04-28 1.132798 1.134756\n2021-04-29 1.154973 1.156970\n2021-04-30 1.144361 1.146339\n\n[63 rows x 2 columns]\nTT strategy return\nDate \n2021-02-01 0.995823 0.995823\n2021-02-02 1.011573 1.011573\n2021-02-03 1.021317 1.021317\n2021-02-04 1.038626 1.038626\n2021-02-05 1.042191 1.042191\n... ... ...\n2021-04-26 1.218317 1.218317\n2021-04-27 1.225174 1.225174\n2021-04-28 1.229128 1.229128\n2021-04-29 1.237829 1.237829\n2021-04-30 1.225484 1.225484\n\n[63 rows x 2 columns]\nFDX strategy return\nDate \n2021-02-01 1.016710 1.016710\n2021-02-02 1.030659 1.030659\n2021-02-03 1.025529 1.025529\n2021-02-04 1.054972 1.054972\n2021-02-05 1.084678 1.084678\n... ... ...\n2021-04-26 1.186251 1.186251\n2021-04-27 1.237889 1.237889\n2021-04-28 1.237373 1.237373\n2021-04-29 1.275271 1.275271\n2021-04-30 1.250742 1.250742\n\n[63 rows x 2 columns]\nTTWO strategy return\nDate \n2021-02-01 1.002448 1.002448\n2021-02-02 1.033706 1.033706\n2021-02-03 1.010463 1.010463\n2021-02-04 1.005930 1.005930\n2021-02-05 1.036335 1.036335\n... ... ...\n2021-04-26 0.989355 0.890787\n2021-04-27 0.977372 0.879997\n2021-04-28 0.977933 0.880503\n2021-04-29 0.989725 0.891119\n2021-04-30 0.983630 0.885632\n\n[63 rows x 2 columns]\nTEL strategy return\nDate \n2021-02-01 1.034897 1.034897\n2021-02-02 1.032159 1.032159\n2021-02-03 1.070155 1.070155\n2021-02-04 1.071820 1.071820\n2021-02-05 1.064940 1.064940\n... ... ...\n2021-04-26 1.137468 1.137468\n2021-04-27 1.137300 1.137300\n2021-04-28 1.143798 1.143798\n2021-04-29 1.162975 1.162975\n2021-04-30 1.132166 1.132166\n\n[63 rows x 2 columns]\nXYL strategy return\nDate \n2021-02-01 1.005710 1.005710\n2021-02-02 1.006125 1.006125\n2021-02-03 0.985021 0.985021\n2021-02-04 1.006481 1.006481\n2021-02-05 1.017102 1.017102\n... ... ...\n2021-04-26 1.210516 1.152570\n2021-04-27 1.220642 1.162212\n2021-04-28 1.219547 1.161169\n2021-04-29 1.230668 1.171757\n2021-04-30 1.213038 1.154971\n\n[63 rows x 2 columns]\nHWM strategy return\nDate \n2021-02-01 1.019305 1.019305\n2021-02-02 1.063363 1.063363\n2021-02-03 1.094351 1.094351\n2021-02-04 1.114083 1.114083\n2021-02-05 1.105558 1.105558\n... ... ...\n2021-04-26 1.330612 1.330612\n2021-04-27 1.322788 1.322788\n2021-04-28 1.326510 1.326510\n2021-04-29 1.327336 1.327336\n2021-04-30 1.319924 1.319924\n\n[63 rows x 2 columns]\nPTC strategy return\nDate \n2021-02-01 1.031251 1.031251\n2021-02-02 1.023676 1.038882\n2021-02-03 1.022787 1.039786\n2021-02-04 1.031412 1.048554\n2021-02-05 1.057202 1.074772\n... ... ...\n2021-04-26 1.124616 1.143307\n2021-04-27 1.116710 1.135270\n2021-04-28 1.120495 1.139117\n2021-04-29 1.013585 1.030431\n2021-04-30 0.994919 1.011454\n\n[63 rows x 2 columns]\nFCX strategy return\nDate \n2021-02-01 1.050275 1.050275\n2021-02-02 1.046932 1.046932\n2021-02-03 1.087121 1.087121\n2021-02-04 1.109690 1.109690\n2021-02-05 1.183938 1.183938\n... ... ...\n2021-04-26 1.517864 1.517864\n2021-04-27 1.503935 1.503935\n2021-04-28 1.529810 1.529810\n2021-04-29 1.493319 1.493319\n2021-04-30 1.466726 1.466726\n\n[63 rows x 2 columns]\nNWSA strategy return\nDate \n2021-02-01 1.001548 0.998455\n2021-02-02 0.980065 1.020341\n2021-02-03 0.977592 1.022922\n2021-02-04 0.955187 1.046916\n2021-02-05 0.912864 1.095454\n... ... ...\n2021-04-26 1.163186 1.395845\n2021-04-27 1.158396 1.390098\n2021-04-28 1.141934 1.370343\n2021-04-29 1.157309 1.388794\n2021-04-30 1.143429 1.372137\n\n[63 rows x 2 columns]\nWY strategy return\nDate \n2021-02-01 1.027627 1.027627\n2021-02-02 1.014561 1.040862\n2021-02-03 0.967537 1.091449\n2021-02-04 0.966683 1.090486\n2021-02-05 0.979289 1.104707\n... ... ...\n2021-04-26 1.043750 1.265764\n2021-04-27 1.058363 1.283485\n2021-04-28 1.068897 1.296259\n2021-04-29 1.065408 1.292029\n2021-04-30 1.042280 1.263981\n\n[63 rows x 2 columns]\nPKI strategy return\nDate \n2021-02-01 1.033597 1.033597\n2021-02-02 1.028169 1.028169\n2021-02-03 0.953403 0.953403\n2021-02-04 0.971658 0.971658\n2021-02-05 0.989983 0.989983\n... ... ...\n2021-04-26 0.982283 0.931144\n2021-04-27 0.982501 0.931351\n2021-04-28 0.974384 0.923657\n2021-04-29 0.950023 0.900564\n2021-04-30 0.943711 0.894580\n\n[63 rows x 2 columns]\nTWTR strategy return\nDate \n2021-02-01 1.043054 1.043054\n2021-02-02 1.069937 1.069937\n2021-02-03 1.081491 1.081491\n2021-02-04 1.120417 1.120417\n2021-02-05 1.125783 1.125783\n... ... ...\n2021-04-26 1.367003 1.367003\n2021-04-27 1.352533 1.352533\n2021-04-28 1.346196 1.346196\n2021-04-29 1.333755 1.333755\n2021-04-30 1.146097 1.146097\n\n[63 rows x 2 columns]\nTSLA strategy return\nDate \n2021-02-01 1.060056 1.060056\n2021-02-02 1.102513 1.102513\n2021-02-03 1.079885 1.079885\n2021-02-04 1.073963 1.073963\n2021-02-05 1.076797 1.076797\n... ... ...\n2021-04-26 0.985481 0.985481\n2021-04-27 0.941809 0.941809\n2021-04-28 0.928092 0.928092\n2021-04-29 0.905125 0.905125\n2021-04-30 0.949552 0.949552\n\n[63 rows x 2 columns]\nNWS strategy return\nDate \n2021-02-01 1.004778 0.995244\n2021-02-02 0.981523 1.018825\n2021-02-03 0.979993 1.020415\n2021-02-04 0.961845 1.039668\n2021-02-05 0.904780 1.105241\n... ... ...\n2021-04-26 1.092680 1.334772\n2021-04-27 1.083038 1.322993\n2021-04-28 1.070345 1.307489\n2021-04-29 1.076970 1.315581\n2021-04-30 1.070385 1.307538\n\n[63 rows x 2 columns]\nCZR strategy return\nDate \n2021-02-01 1.005842 1.005842\n2021-02-02 1.082498 1.082498\n2021-02-03 1.093233 1.093233\n2021-02-04 1.132819 1.132819\n2021-02-05 1.182331 1.182331\n... ... ...\n2021-04-26 1.421526 1.421526\n2021-04-27 1.493500 1.493500\n2021-04-28 1.505730 1.505730\n2021-04-29 1.460489 1.460489\n2021-04-30 1.436751 1.436751\n\n[63 rows x 2 columns]\n" ], [ "spy = np.array(yf.Ticker(\"spy\").history(period=\"2y\").loc[\"2021-02\":\"2021-04\"][\"Close\"].pct_change().cumsum().apply(np.exp))\n\nplt.figure(figsize=(10,8))\nplt.plot(strat,'b-', label=\"Strategy\")\nplt.plot(actual,'r--', label=\"Actual\")\nplt.plot(spy,'g:', label=\"Spy\")\nplt.grid()\nplt.legend()", "_____no_output_____" ] ], [ [ "## EMA", "_____no_output_____" ] ], [ [ "short_win = 12\nlong_win = 26\n\nstrat = np.zeros(63)\n\nactual = np.zeros(63)\n\nfor ticker in tickers:\n historical = yf.Ticker(ticker).history(period=\"2y\")\n historical[\"return\"] = historical[\"Close\"].pct_change()\n historical[\"exp1\"] = historical[\"Close\"].ewm(span=short_win, adjust=False).mean().shift()\n historical[\"exp2\"] = historical[\"Close\"].ewm(span=long_win, adjust=False).mean().shift()\n historical[\"distance1\"] = (historical[\"Close\"] - historical[\"exp1\"]).shift()\n historical[\"distance2\"] = (historical[\"Close\"] - historical[\"exp2\"]).shift()\n #historical[\"distance3\"] = (historical[\"exp1\"] - historical[\"exp2\"]).shift()\n historical.dropna(inplace=True)\n \n historical_train = historical.loc[\"2020-07\":\"2020\"].copy()\n historical_test = historical.loc[\"2021-02\":\"2021-04\"].copy()\n \n scaler = StandardScaler()\n X_scaler = scaler.fit(historical_train[[\"exp1\",\"exp2\",\"distance1\",\"distance2\"]])\n X_train_scaled = X_scaler.transform(historical_train[[\"exp1\",\"exp2\",\"distance1\",\"distance2\"]])\n X_test_scaled = X_scaler.transform(historical_test[[\"exp1\",\"exp2\",\"distance1\",\"distance2\"]])\n \n svm_model = SVC(C=0.5)\n svm_model = svm_model.fit(X_train_scaled, np.sign(historical_train[[\"return\"]]))\n \n historical_test[\"prediction\"] = svm_model.predict(X_test_scaled)\n \n historical_test[\"strategy\"] = historical_test[\"prediction\"] * historical_test[\"return\"]\n \n cum_ret = historical_test[[\"strategy\",\"return\"]].cumsum().apply(np.exp)\n \n strat = strat + np.array(cum_ret[\"strategy\"]) * weighting\n \n actual = actual + np.array(cum_ret[\"return\"]) * weighting\n \n print(ticker, cum_ret)", "PWR strategy return\nDate \n2021-02-01 1.034790 1.034790\n2021-02-02 1.077388 1.077388\n2021-02-03 1.053500 1.053500\n2021-02-04 1.070550 1.070550\n2021-02-05 1.079687 1.079687\n... ... ...\n2021-04-26 1.402189 1.402189\n2021-04-27 1.408805 1.408805\n2021-04-28 1.404078 1.404078\n2021-04-29 1.416907 1.416907\n2021-04-30 1.387090 1.387090\n\n[63 rows x 2 columns]\nTRMB strategy return\nDate \n2021-02-01 1.027842 1.027842\n2021-02-02 1.075828 1.075828\n2021-02-03 1.072642 1.072642\n2021-02-04 1.085327 1.085327\n2021-02-05 1.098166 1.098166\n... ... ...\n2021-04-26 1.295104 1.295104\n2021-04-27 1.294180 1.294180\n2021-04-28 1.284976 1.284976\n2021-04-29 1.288059 1.288059\n2021-04-30 1.262753 1.262753\n\n[63 rows x 2 columns]\nENPH strategy return\nDate \n2021-02-01 1.022122 1.022122\n2021-02-02 1.057213 1.057213\n2021-02-03 1.077772 1.077772\n2021-02-04 1.073882 1.073882\n2021-02-05 1.060625 1.060625\n... ... ...\n2021-04-26 1.022528 1.022528\n2021-04-27 1.013389 1.013389\n2021-04-28 0.879784 0.879784\n2021-04-29 0.859013 0.859013\n2021-04-30 0.834509 0.834509\n\n[63 rows x 2 columns]\nIDXX strategy return\nDate \n2021-02-01 1.029120 1.029120\n2021-02-02 1.059293 1.059293\n2021-02-03 1.015703 1.015703\n2021-02-04 1.033946 1.033946\n2021-02-05 1.052188 1.052188\n... ... ...\n2021-04-26 1.182061 1.182061\n2021-04-27 1.180200 1.180200\n2021-04-28 1.180771 1.180771\n2021-04-29 1.169857 1.169857\n2021-04-30 1.162177 1.162177\n\n[63 rows x 2 columns]\nAES strategy return\nDate \n2021-02-01 1.017369 1.017369\n2021-02-02 1.056655 1.056655\n2021-02-03 1.060766 1.060766\n2021-02-04 1.073979 1.073979\n2021-02-05 1.102672 1.102672\n... ... ...\n2021-04-26 1.170908 1.170908\n2021-04-27 1.154013 1.154013\n2021-04-28 1.161092 1.161092\n2021-04-29 1.164478 1.164478\n2021-04-30 1.161143 1.161143\n\n[63 rows x 2 columns]\nCPRT strategy return\nDate \n2021-02-01 1.010165 1.010165\n2021-02-02 1.038811 1.038811\n2021-02-03 1.021994 1.021994\n2021-02-04 1.042528 1.042528\n2021-02-05 1.089704 1.089704\n... ... ...\n2021-04-26 1.168394 1.136293\n2021-04-27 1.173326 1.141089\n2021-04-28 1.166813 1.134756\n2021-04-29 1.189655 1.156970\n2021-04-30 1.178724 1.146339\n\n[63 rows x 2 columns]\nTT strategy return\nDate \n2021-02-01 0.995823 0.995823\n2021-02-02 1.011573 1.011573\n2021-02-03 1.021317 1.021317\n2021-02-04 1.038626 1.038626\n2021-02-05 1.042191 1.042191\n... ... ...\n2021-04-26 1.218317 1.218317\n2021-04-27 1.225174 1.225174\n2021-04-28 1.229128 1.229128\n2021-04-29 1.237829 1.237829\n2021-04-30 1.225484 1.225484\n\n[63 rows x 2 columns]\nFDX strategy return\nDate \n2021-02-01 0.983565 1.016710\n2021-02-02 0.970253 1.030659\n2021-02-03 0.965424 1.025529\n2021-02-04 0.993142 1.054972\n2021-02-05 1.021107 1.084678\n... ... ...\n2021-04-26 1.116727 1.186251\n2021-04-27 1.165338 1.237889\n2021-04-28 1.164852 1.237373\n2021-04-29 1.200529 1.275271\n2021-04-30 1.177437 1.250742\n\n[63 rows x 2 columns]\nTTWO strategy return\nDate \n2021-02-01 1.002448 1.002448\n2021-02-02 1.033706 1.033706\n2021-02-03 1.010463 1.010463\n2021-02-04 1.005930 1.005930\n2021-02-05 1.036335 1.036335\n... ... ...\n2021-04-26 0.890787 0.890787\n2021-04-27 0.879997 0.879997\n2021-04-28 0.880503 0.880503\n2021-04-29 0.891119 0.891119\n2021-04-30 0.885632 0.885632\n\n[63 rows x 2 columns]\nTEL strategy return\nDate \n2021-02-01 1.034897 1.034897\n2021-02-02 1.032159 1.032159\n2021-02-03 1.070155 1.070155\n2021-02-04 1.071820 1.071820\n2021-02-05 1.064940 1.064940\n... ... ...\n2021-04-26 1.137468 1.137468\n2021-04-27 1.137300 1.137300\n2021-04-28 1.143798 1.143798\n2021-04-29 1.162975 1.162975\n2021-04-30 1.132166 1.132166\n\n[63 rows x 2 columns]\nXYL strategy return\nDate \n2021-02-01 1.005710 1.005710\n2021-02-02 1.006125 1.006125\n2021-02-03 0.985021 0.985021\n2021-02-04 1.006481 1.006481\n2021-02-05 1.017102 1.017102\n... ... ...\n2021-04-26 1.203848 1.152570\n2021-04-27 1.213918 1.162212\n2021-04-28 1.215009 1.161169\n2021-04-29 1.226089 1.171757\n2021-04-30 1.243909 1.154971\n\n[63 rows x 2 columns]\nHWM strategy return\nDate \n2021-02-01 1.019305 1.019305\n2021-02-02 1.063363 1.063363\n2021-02-03 1.094351 1.094351\n2021-02-04 1.114083 1.114083\n2021-02-05 1.105558 1.105558\n... ... ...\n2021-04-26 1.330612 1.330612\n2021-04-27 1.322788 1.322788\n2021-04-28 1.326510 1.326510\n2021-04-29 1.327336 1.327336\n2021-04-30 1.319924 1.319924\n\n[63 rows x 2 columns]\nPTC strategy return\nDate \n2021-02-01 1.031251 1.031251\n2021-02-02 1.038882 1.038882\n2021-02-03 1.039786 1.039786\n2021-02-04 1.048554 1.048554\n2021-02-05 1.074772 1.074772\n... ... ...\n2021-04-26 1.143307 1.143307\n2021-04-27 1.135270 1.135270\n2021-04-28 1.139117 1.139117\n2021-04-29 1.030431 1.030431\n2021-04-30 1.011454 1.011454\n\n[63 rows x 2 columns]\nFCX strategy return\nDate \n2021-02-01 1.050275 1.050275\n2021-02-02 1.046932 1.046932\n2021-02-03 1.087121 1.087121\n2021-02-04 1.109690 1.109690\n2021-02-05 1.183938 1.183938\n... ... ...\n2021-04-26 1.517864 1.517864\n2021-04-27 1.503935 1.503935\n2021-04-28 1.529810 1.529810\n2021-04-29 1.493319 1.493319\n2021-04-30 1.466726 1.466726\n\n[63 rows x 2 columns]\nNWSA strategy return\nDate \n2021-02-01 1.001548 0.998455\n2021-02-02 0.980065 1.020341\n2021-02-03 0.977592 1.022922\n2021-02-04 0.955187 1.046916\n2021-02-05 0.999472 1.095454\n... ... ...\n2021-04-26 1.273543 1.395845\n2021-04-27 1.268299 1.390098\n2021-04-28 1.250276 1.370343\n2021-04-29 1.267109 1.388794\n2021-04-30 1.251912 1.372137\n\n[63 rows x 2 columns]\nWY strategy return\nDate \n2021-02-01 1.027627 1.027627\n2021-02-02 1.040862 1.040862\n2021-02-03 1.091449 1.091449\n2021-02-04 1.090486 1.090486\n2021-02-05 1.104707 1.104707\n... ... ...\n2021-04-26 1.265764 1.265764\n2021-04-27 1.283485 1.283485\n2021-04-28 1.296259 1.296259\n2021-04-29 1.292029 1.292029\n2021-04-30 1.263981 1.263981\n\n[63 rows x 2 columns]\nPKI strategy return\nDate \n2021-02-01 1.033597 1.033597\n2021-02-02 1.028169 1.028169\n2021-02-03 0.953403 0.953403\n2021-02-04 0.971658 0.971658\n2021-02-05 0.989983 0.989983\n... ... ...\n2021-04-26 0.931144 0.931144\n2021-04-27 0.931351 0.931351\n2021-04-28 0.923657 0.923657\n2021-04-29 0.900564 0.900564\n2021-04-30 0.894580 0.894580\n\n[63 rows x 2 columns]\nTWTR strategy return\nDate \n2021-02-01 1.043054 1.043054\n2021-02-02 1.069937 1.069937\n2021-02-03 1.081491 1.081491\n2021-02-04 1.120417 1.120417\n2021-02-05 1.125783 1.125783\n... ... ...\n2021-04-26 1.367003 1.367003\n2021-04-27 1.352533 1.352533\n2021-04-28 1.346196 1.346196\n2021-04-29 1.333755 1.333755\n2021-04-30 1.146097 1.146097\n\n[63 rows x 2 columns]\nTSLA strategy return\nDate \n2021-02-01 1.060056 1.060056\n2021-02-02 1.102513 1.102513\n2021-02-03 1.079885 1.079885\n2021-02-04 1.073963 1.073963\n2021-02-05 1.076797 1.076797\n... ... ...\n2021-04-26 0.985481 0.985481\n2021-04-27 0.941809 0.941809\n2021-04-28 0.928092 0.928092\n2021-04-29 0.905125 0.905125\n2021-04-30 0.949552 0.949552\n\n[63 rows x 2 columns]\nNWS strategy return\nDate \n2021-02-01 1.004778 0.995244\n2021-02-02 0.981523 1.018825\n2021-02-03 0.979993 1.020415\n2021-02-04 0.961845 1.039668\n2021-02-05 0.904780 1.105241\n... ... ...\n2021-04-26 1.092680 1.334772\n2021-04-27 1.083038 1.322993\n2021-04-28 1.070345 1.307489\n2021-04-29 1.076970 1.315581\n2021-04-30 1.070385 1.307538\n\n[63 rows x 2 columns]\nCZR strategy return\nDate \n2021-02-01 1.005842 1.005842\n2021-02-02 1.082498 1.082498\n2021-02-03 1.093233 1.093233\n2021-02-04 1.132819 1.132819\n2021-02-05 1.182331 1.182331\n... ... ...\n2021-04-26 1.421526 1.421526\n2021-04-27 1.493500 1.493500\n2021-04-28 1.505730 1.505730\n2021-04-29 1.460489 1.460489\n2021-04-30 1.436751 1.436751\n\n[63 rows x 2 columns]\n" ], [ "spy = np.array(yf.Ticker(\"spy\").history(period=\"2y\").loc[\"2021-02\":\"2021-04\"][\"Close\"].pct_change().cumsum().apply(np.exp))\n\nplt.figure(figsize=(10,8))\nplt.plot(strat,'b-', label=\"Strategy\")\nplt.plot(actual,'r--', label=\"Actual\")\nplt.plot(spy,'g:', label=\"Spy\")\nplt.grid()\nplt.legend()", "_____no_output_____" ] ], [ [ "# MACD", "_____no_output_____" ] ], [ [ "short_win = 12\nlong_win = 26\nsignal_line = 9\nstrat = np.zeros(63)\n\nactual = np.zeros(63)\n\nfor ticker in tickers:\n historical = yf.Ticker(ticker).history(period=\"2y\")\n historical[\"return\"] = historical[\"Close\"].pct_change()\n historical[\"exp1\"] = historical[\"Close\"].ewm(span=short_win, adjust=False).mean().shift()\n historical[\"exp2\"] = historical[\"Close\"].ewm(span=long_win, adjust=False).mean().shift()\n historical[\"macd\"] = historical[\"exp1\"] - historical[\"exp2\"]\n historical[\"exp3\"] = historical[\"Close\"].ewm(span=signal_line, adjust=False).mean().shift()\n historical[\"macd_histogram\"] = historical[\"macd\"] - historical[\"exp3\"]\n historical[\"lag_1\"] = historical[\"Close\"].shift()\n historical[\"roc\"] = ((historical[\"Close\"] - historical[\"lag_1\"])/ historical[\"lag_1\"]).shift()\n historical[\"macd_histogram_lag1\"] = historical[\"macd_histogram\"].shift()\n historical[\"roc_macd\"] = ((historical[\"macd_histogram\"]-historical[\"macd_histogram_lag1\"])/historical[\"macd_histogram_lag1\"])\n historical.dropna(inplace=True)\n \n historical_train = historical.loc[:\"2020\"].copy()\n historical_test = historical.loc[\"2021-02\":\"2021-04\"].copy()\n \n scaler = StandardScaler()\n X_scaler = scaler.fit(historical_train[[\"roc\",\"roc_macd\"]])\n X_train_scaled = X_scaler.transform(historical_train[[\"roc\",\"roc_macd\"]])\n X_test_scaled = X_scaler.transform(historical_test[[\"roc\",\"roc_macd\"]])\n \n svm_model = SVC(C=0.5)\n svm_model = svm_model.fit(X_train_scaled, np.sign(historical_train[[\"return\"]]))\n \n historical_test[\"prediction\"] = svm_model.predict(X_test_scaled)\n \n historical_test[\"strategy\"] = historical_test[\"prediction\"] * historical_test[\"return\"]\n \n cum_ret = historical_test[[\"strategy\",\"return\"]].cumsum().apply(np.exp)\n \n strat = strat + np.array(cum_ret[\"strategy\"]) * weighting\n \n actual = actual + np.array(cum_ret[\"return\"]) * weighting\n \n print(ticker, accuracy_score(historical_test[\"prediction\"],\n np.sign(historical_test[\"return\"])))", "PWR 0.6507936507936508\nTRMB 0.6349206349206349\nENPH 0.5238095238095238\nIDXX 0.5396825396825397\nAES 0.6190476190476191\nCPRT 0.5079365079365079\nTT 0.6349206349206349\nFDX 0.47619047619047616\nTTWO 0.4603174603174603\nTEL 0.4603174603174603\nXYL 0.5238095238095238\nHWM 0.6031746031746031\nPTC 0.5555555555555556\nFCX 0.5238095238095238\nNWSA 0.5873015873015873\nWY 0.5555555555555556\nPKI 0.5238095238095238\nTWTR 0.4603174603174603\nTSLA 0.47619047619047616\nNWS 0.5079365079365079\nCZR 0.6190476190476191\nDE 0.5396825396825397\nISRG 0.5555555555555556\nHON 0.6031746031746031\nURI 0.5873015873015873\nMS 0.6190476190476191\nVIAC 0.6507936507936508\nPAYC 0.5396825396825397\nSIVB 0.47619047619047616\nZBRA 0.4444444444444444\nALB 0.6031746031746031\nAAPL 0.5079365079365079\nCAT 0.6666666666666666\nTPR 0.6031746031746031\nMPWR 0.5079365079365079\nAPTV 0.5396825396825397\nEXPE 0.5873015873015873\nTGT 0.5555555555555556\nNTAP 0.6031746031746031\nDXC 0.5238095238095238\nCARR 0.5555555555555556\nUAA 0.5714285714285714\nLRCX 0.6031746031746031\nUPS 0.5873015873015873\nWRK 0.5714285714285714\nCE 0.47619047619047616\nQRVO 0.5396825396825397\nDVA 0.6031746031746031\nETSY 0.4444444444444444\nIP 0.5714285714285714\n" ], [ "spy = np.array(yf.Ticker(\"spy\").history(period=\"2y\").loc[\"2021-02\":\"2021-04\"][\"Close\"].pct_change().cumsum().apply(np.exp))\n\nplt.figure(figsize=(10,8))\nplt.plot(strat,'b-', label=\"Strategy\")\nplt.plot(actual,'r--', label=\"Actual\")\nplt.plot(spy,'g:', label=\"Spy\")\nplt.grid()\nplt.legend()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0146f022c2a0e5aaec740225ce78ea17311a7ec
33,976
ipynb
Jupyter Notebook
ejemplo_grafica.ipynb
jorgemauricio/generar_boletin
619e433453928d839c7cd0ee03551cd5f45dbede
[ "MIT" ]
null
null
null
ejemplo_grafica.ipynb
jorgemauricio/generar_boletin
619e433453928d839c7cd0ee03551cd5f45dbede
[ "MIT" ]
null
null
null
ejemplo_grafica.ipynb
jorgemauricio/generar_boletin
619e433453928d839c7cd0ee03551cd5f45dbede
[ "MIT" ]
null
null
null
76.69526
10,416
0.774517
[ [ [ "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom time import strftime\nimport os\n%matplotlib inline", "_____no_output_____" ], [ "\"\"\"\nFunción que genera los mapas de temperatura mínima\n\"\"\"\n#%% fecha del pronostico\nfechaPronostico = strftime(\"%Y-%m-%d\")\nvariables = [\"Rain\",\"Tmax\",\"Tmin\",\"Tpro\",\"Hr\",\"Hrmin\",\"Hrmax\"]\n\nLAT_MAX = 33.5791\nLAT_MIN = 12.3782\n\nLONG_MAX = -86.101\nLONG_MIN = -118.236\n\n#%% generate arrayFechas\n# Generate Days\narrayFechas = []\ntanio, tmes, tdia = fechaPronostico.split('-')\nanio = int(tanio)\nmes = int(tmes)\ndia = int(tdia)\n\ndirAnio = anio\ndirMes = mes\ndirDia = dia\n\n#%% generate arrayFechas\n\nfor i in range(0,5,1):\n if i == 0:\n newDiaString = '{}'.format(dia)\n if len(newDiaString) == 1:\n newDiaString = '0' + newDiaString\n newMesString = '{}'.format(mes)\n if len(newMesString) == 1:\n newMesString = '0' + newMesString\n fecha = '{}'.format(anio)+\"-\"+newMesString+\"-\"+newDiaString\n arrayFechas.append(fecha)\n if i > 0:\n dia = dia + 1\n if mes == 2 and anio % 4 == 0:\n diaEnElMes = 29\n elif mes == 2 and anio % 4 != 0:\n diaEnElMes = 28\n elif mes == 1 or mes == 3 or mes == 5 or mes == 7 or mes == 8 or mes == 10 or mes == 12:\n diaEnElMes = 31\n elif mes == 4 or mes == 6 or mes == 9 or mes == 11:\n diaEnElMes = 30\n if dia > diaEnElMes:\n mes = mes + 1\n dia = 1\n if mes > 12:\n anio = anio + 1\n mes = 1\n newDiaString = '{}'.format(dia)\n if len(newDiaString) == 1:\n newDiaString = '0' + newDiaString\n newMesString = '{}'.format(mes)\n if len(newMesString) == 1:\n newMesString = '0' + newMesString\n fecha = '{}'.format(anio)+\"-\"+newMesString+\"-\"+newDiaString\n arrayFechas.append(fecha)\n\n# path server\npath = \"/home/jorge/Documents/Research/generar_boletin\"\n# os.chdir(\"/home/jorge/Documents/work/autoPronosticoSonora\")\nos.chdir(path)\n\n#%% read csvs\npathFile1 = '{}/data/{}/d1.txt'.format(path, fechaPronostico)\npathFile2 = '{}/data/{}/d2.txt'.format(path, fechaPronostico)\npathFile3 = '{}/data/{}/d3.txt'.format(path, fechaPronostico)\npathFile4 = '{}/data/{}/d4.txt'.format(path, fechaPronostico)\npathFile5 = '{}/data/{}/d5.txt'.format(path, fechaPronostico)\n\ndata1 = pd.read_table(pathFile1, sep=',')\ndata2 = pd.read_table(pathFile2, sep=',')\ndata3 = pd.read_table(pathFile3, sep=',')\ndata4 = pd.read_table(pathFile4, sep=',')\ndata5 = pd.read_table(pathFile5, sep=',')\n\ncols = [\"Long\",\"Lat\",\"Graupel\",\"Hail\",\"Rain\",\"Tmax\",\"Tmin\",\"Tpro\",\"Dpoint\",\"Hr\",\"Windpro\",\"WindDir\",\"Hrmin\",\"Hrmax\",\"TprSoil0_10\",\"TprSoil10_40\",\"WprSoil0_10\",\"WprSoil10_40\"]\n\ndata1.columns = cols\ndata2.columns = cols\ndata3.columns = cols\ndata4.columns = cols\ndata5.columns = cols", "_____no_output_____" ], [ "#%% make one dataFrame\nvariable = \"Rain\"\ndata = data1.filter(items=['Long', 'Lat'])\ndata['{}1'.format(variable)] = data1[variable]\ndata['{}2'.format(variable)] = data2[variable]\ndata['{}3'.format(variable)] = data3[variable]\ndata['{}4'.format(variable)] = data4[variable]\ndata['{}5'.format(variable)] = data5[variable]\n\n#%% get values from Ags\ndata = data.loc[data['Lat'] >= LAT_MIN]\ndata = data.loc[data['Lat'] <= LAT_MAX]\ndata = data.loc[data['Long'] >= LONG_MIN]\ndata = data.loc[data['Long'] <= LONG_MAX]\n\ny1 = []\ny2 = []\n\nfor k in range(1,6):\n y1.append(data[\"Rain{}\".format(k)].max())\n y2.append(data[\"Rain{}\".format(k)].min())\n print()\n \nind = np.arange(5)\nfig, ax = plt.subplots()\nwidth = 0.35\nrects1 = ax.bar(ind, y1, width, color='r')\nrects2 = ax.bar(ind + width, y2, width, color='b')\n\n# add some text for labels, title and axes ticks\nax.set_ylabel('mm')\nax.set_title('Precipitación')\nax.set_xticks(ind + width / 2)\nax.set_xticklabels(arrayFechas)\n\nax.legend((rects1[0], rects2[0]), ('Máximo', 'Mínimo'))\n\ndef autolabel(rects):\n \"\"\"\n Attach a text label above each bar displaying its height\n \"\"\"\n for rect in rects:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width()/2., .90*height,\n '%d' % int(height),\n ha='center', va='bottom')\n\nautolabel(rects1)\nautolabel(rects2)\n", "\n\n\n\n\n" ], [ "data.head()", "_____no_output_____" ], [ "data.columns", "_____no_output_____" ], [ "plt.plot(1, data[\"Rain1\"].max(), 2, \"r\", data[\"Rain2\"].max(), \"b\")\nplt.title(\"Gráfica\")", "_____no_output_____" ], [ "data[\"Rain1\"].max()", "_____no_output_____" ], [ "pd.Series(data.max()).plot.bar()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01470826d6102ed06aef71444fc4a86bf9d08b8
11,686
ipynb
Jupyter Notebook
examples/site-specific/cancer-care-associates/production/Winston Lutz/prototyping/refactor/018_run_all_iViewDB.ipynb
lipteck/pymedphys
6e8e2b5db8173eafa6006481ceeca4f4341789e0
[ "Apache-2.0" ]
2
2020-02-04T03:21:20.000Z
2020-04-11T14:17:53.000Z
site-specific/cancer-care-associates/Winston Lutz/prototyping/refactor/018_run_all_iViewDB.ipynb
SimonBiggs/pymedphys
83f02eac6549ac155c6963e0a8d1f9284359b652
[ "Apache-2.0" ]
6
2020-10-06T15:36:46.000Z
2022-02-27T05:15:17.000Z
site-specific/cancer-care-associates/Winston Lutz/prototyping/refactor/018_run_all_iViewDB.ipynb
SimonBiggs/pymedphys
83f02eac6549ac155c6963e0a8d1f9284359b652
[ "Apache-2.0" ]
1
2020-12-20T14:14:00.000Z
2020-12-20T14:14:00.000Z
24.970085
132
0.524645
[ [ [ "%load_ext autoreload\n%autoreload 2", "_____no_output_____" ], [ "import pathlib\n\nimport IPython.display\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport scipy.interpolate\nimport scipy.signal\n\nimport pymedphys\nimport pymedphys._wlutz.iview", "_____no_output_____" ], [ "indexed_dir = pathlib.Path(r'S:\\DataExchange\\iViewDB_decoded\\indexed')\nmovie_dirs = list(indexed_dir.glob('*/*/*/*/*'))\nmovie_dirs", "_____no_output_____" ], [ "wlutz_results = {}", "_____no_output_____" ], [ "edge_lengths = [20, 24]\npd.set_option(\"display.max_rows\", 101)", "_____no_output_____" ], [ "for directory in movie_dirs:\n image_paths = list(directory.glob('*.png'))\n print(image_paths)\n wlutz_results[directory] = pymedphys._wlutz.iview.batch_process(image_paths, edge_lengths, display_figure=True)\n \n IPython.display.display(wlutz_results[directory])", "_____no_output_____" ], [ "for directory in movie_dirs:\n try:\n wlutz_results[directory]\n except KeyError:\n image_paths = list(directory.glob('*.png'))\n print(image_paths)\n \n try:\n wlutz_results[directory] = pymedphys._wlutz.iview.batch_process(image_paths, edge_lengths, display_figure=True)\n IPython.display.display(wlutz_results[directory])\n except ValueError:\n continue", "_____no_output_____" ], [ "for directory in movie_dirs:\n try:\n wlutz_results[directory]\n except KeyError:\n image_paths = list(directory.glob('*.png'))\n print(image_paths)\n \n try:\n wlutz_results[directory] = pymedphys._wlutz.iview.batch_process(image_paths, edge_lengths, display_figure=True)\n IPython.display.display(wlutz_results[directory])\n except ValueError:\n continue", "_____no_output_____" ], [ "for directory in movie_dirs:\n try:\n wlutz_results[directory]\n except KeyError:\n image_paths = list(directory.glob('*.png'))\n print(image_paths)\n \n try:\n wlutz_results[directory] = pymedphys._wlutz.iview.batch_process(image_paths, edge_lengths, display_figure=True)\n IPython.display.display(wlutz_results[directory])\n except ValueError:\n continue", "_____no_output_____" ], [ "for key, table in wlutz_results.items():\n print(key)\n IPython.display.display(table)", "_____no_output_____" ], [ "keys = list(wlutz_results.keys())\nkeys", "_____no_output_____" ], [ "direction_keys = [\n key.parent.stem for key in keys\n]\n\ndirection_keys", "_____no_output_____" ], [ "rotations = [\n wlutz_results[key]['Rotation']\n for key in keys\n]\n\n\n\n\nlt_zero = rotations[0] < 0\n\ngantry = np.empty_like(rotations[0])\ngantry[lt_zero] = -180 - rotations[0][lt_zero]\n\ngte_zero = np.invert(lt_zero)\n\ngantry[gte_zero] = 180 - rotations[0][gte_zero]\n\ngantry", "_____no_output_____" ], [ "gantry = []\n\nfor i, direction_key in enumerate(direction_keys):\n\n if direction_keys[i] == '00_CW':\n diff = np.diff(np.concatenate([[-180], rotations[i]]))\n diff[diff > 0] = diff[diff > 0] - 180\n\n gantry.append(-180 - np.cumsum(diff * 2))\n elif direction_keys[i] == '01_CC':\n diff = np.diff(np.concatenate([[0], rotations[i]]))\n diff[diff < 0] = diff[diff < 0] + 180\n\n gantry.append(180 - np.cumsum(diff * 2))\n else:\n raise ValueError(\"Expected one of '00_CW' or '01_CC'\")\n\n\n \ngantry", "_____no_output_____" ], [ "bb_x = [\n wlutz_results[key]['BB x'] for key in keys\n]\nbb_y = [\n wlutz_results[key]['BB y'] for key in keys\n]", "_____no_output_____" ], [ "gantry", "_____no_output_____" ], [ "bb_x", "_____no_output_____" ], [ "scipy.interpolate.interp1d?", "_____no_output_____" ], [ "interp_bb_x = [\n scipy.interpolate.interp1d(g, x, bounds_error=False, fill_value='extrapolate')\n for g, x in zip(gantry, bb_x)\n]\n\ndef get_avg_bb_x(gantry):\n results = []\n \n for interp in interp_bb_x:\n results.append(interp(gantry))\n \n return (np.min(results, axis=0) + np.max(results, axis=0))/2\n\n\ninterp_bb_y = [\n scipy.interpolate.interp1d(g, y, bounds_error=False, fill_value='extrapolate')\n for g, y in zip(gantry, bb_y)\n]\n\ndef get_avg_bb_y(gantry):\n results = []\n \n for interp in interp_bb_y:\n results.append(interp(gantry))\n \n return (np.min(results, axis=0) + np.max(results, axis=0))/2\n\nget_avg_bb_y([0, 2])", "_____no_output_____" ], [ "# gantry_all = np.concatenate(gantry)\n# ind = np.argsort(gantry_all)\n# sorted_gantry = gantry_all[ind]\n\n# within_bounds = np.logical_and(sorted_gantry <= 180, sorted_gantry >= -180)\n# sorted_gantry = sorted_gantry[within_bounds]\n\n# sorted_bb_x = np.concatenate(bb_x)[ind][within_bounds]\n# sorted_bb_y = np.concatenate(bb_y)[ind][within_bounds]", "_____no_output_____" ], [ "# b, a = scipy.signal.butter(3, 0.05)\n# filtered_bb_x = scipy.signal.filtfilt(b, a, sorted_bb_x)\n# filtered_bb_y = scipy.signal.filtfilt(b, a, sorted_bb_y)", "_____no_output_____" ], [ "# plt.plot(sorted_gantry, filtered_bb_x)", "_____no_output_____" ], [ "# unique_gantry, unique_inverse = np.unique(sorted_gantry, return_inverse=True)\n# inc = np.arange(len(unique_inverse))\n\n# make_unique = np.ones((len(unique_gantry), len(unique_inverse))) * np.nan\n# make_unique[unique_inverse, inc] = sorted_bb_x\n# striclty_increasing_bb_x = np.nanmean(make_unique, axis=1)\n\n# make_unique[unique_inverse, inc] = sorted_bb_y\n# striclty_increasing_bb_y = np.nanmean(make_unique, axis=1)", "_____no_output_____" ], [ "# def predict_bb_pos(gantry, gantry_range=10):\n# gantry = np.array(gantry)\n \n# lte = gantry[:,None] - gantry_range <= gantry_all[None,:]\n# gte = gantry[:,None] + gantry_range >= gantry_all[None,:]\n# in_range = np.logical_and(lte, gte)\n \n# sorted_bb_x\n \n# return in_range\n \n \n# predict_bb_pos([0, 1], gantry_range=10)", "_____no_output_____" ], [ "# unique_gantry = np.unique(sorted_gantry)\n# bb_x_interp = scipy.interpolate.interp1d(sorted_gantry, filtered_bb_x, bounds_error=False)\n# bb_y_interp = scipy.interpolate.interp1d(sorted_gantry, filtered_bb_y, bounds_error=False)", "_____no_output_____" ], [ "# bb_x_interp = scipy.interpolate.UnivariateSpline(unique_gantry, strictly_increasing_bb_x, s=0.1)\n# bb_y_interp = scipy.interpolate.UnivariateSpline(unique_gantry, strictly_increasing_bb_y, s=1)\n\ngantry_i = np.linspace(-180, 180, 91)", "_____no_output_____" ], [ "for i, key in enumerate(keys): \n plt.plot(gantry[i], bb_x[i], '.')\n \nplt.plot(gantry_i, get_avg_bb_x(gantry_i))\nplt.xlim([-180, 180])", "_____no_output_____" ], [ "for i, key in enumerate(keys):\n plt.plot(gantry[i], bb_y[i], '.')\n\nplt.plot(gantry_i, get_avg_bb_y(gantry_i))\nplt.xlim([-180, 180])", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d014802ecf61c6733ae00042b294d758018637ab
42,702
ipynb
Jupyter Notebook
LogisticRegression/LogisticRegression.ipynb
BossLogic314/Machine-Learning
34bb460bb6f2789c0010d43b8738fe01e6c3b62c
[ "Apache-2.0" ]
null
null
null
LogisticRegression/LogisticRegression.ipynb
BossLogic314/Machine-Learning
34bb460bb6f2789c0010d43b8738fe01e6c3b62c
[ "Apache-2.0" ]
null
null
null
LogisticRegression/LogisticRegression.ipynb
BossLogic314/Machine-Learning
34bb460bb6f2789c0010d43b8738fe01e6c3b62c
[ "Apache-2.0" ]
null
null
null
163.609195
19,748
0.885345
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_blobs\nfrom sklearn.model_selection import train_test_split", "_____no_output_____" ], [ "X, y = make_blobs(n_samples = 100, centers = 2, random_state = 42)\n\n# Splitting the data for training and testing\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=42)", "_____no_output_____" ], [ "# Plotting the available data\nplt.scatter(X[:, 0], X[:, 1], c = y)", "_____no_output_____" ], [ "class ImplementLogisticRegression:\n \n # Constructor\n def __init__(self, learning_rate = 0.01):\n self.learning_rate = learning_rate\n \n # Training the model\n def fit(self, X, y):\n \n # Creating a column of ones for multiplication of vectors\n ones = np.ones((X.shape[0], 1))\n \n # Storing x coordinates\n self.X = np.hstack([ones, X])\n \n # Storing the values of 'yi's\n self.y = y.reshape(-1, 1)\n \n # Storing the variables of theta\n self.theta = np.array(np.zeros((self.X.shape[1], 1)))\n \n # Carrying out gradient descent a fixed number of times\n for i in range(200):\n self.gradient_ascent()\n \n def predict(self, X_test, y_test):\n \n # Storing the number of correct and the total number of predictions\n correct, total = 0, 0\n predicted_classes = []\n \n # Predicting for all the points\n for x1, x2, y in zip(X_test[:, 0], X_test[:, 1], y_test[:]):\n \n # Noting that a prediction has been made\n total += 1\n \n # Obtaining the attributes of theta\n theta = self.theta\n theta_0 = theta[0]\n theta_1 = theta[1]\n theta_2 = theta[2]\n \n # Computing the expression by substituting the values in the equation\n val = theta_0 + theta_1 * x1 + theta_2 * x2\n \n # Predicting the class to which the point belongs to\n if(val >= 0):\n predicted_classes.append(1)\n val = 1\n else:\n predicted_classes.append(0)\n val = 0\n \n # Noting for a correct prediction\n if val == y:\n correct += 1\n # Returning the set of predictions and the percentage accuracy\n return np.array(predicted_classes), correct / total * 100\n \n # Returns the outputs from the hypothesis\n def get_hypothesis(self):\n dot_product = np.dot(self.X, self.theta)\n \n # Calculating the outputs from the hypothesis for all the values of X\n hypothesis = 1 / (1 + np.exp(-dot_product))\n return hypothesis\n \n # Performing the gradient descent\n def gradient_ascent(self):\n \n # Getting the hypothesis output for all the values of X\n hypothesis = self.get_hypothesis()\n \n # Calculating the sigma term in the gradient ascent formula\n sigma = np.dot(self.X.T, self.y - hypothesis)\n \n # Updating the values of theta\n self.theta += self.learning_rate * sigma", "_____no_output_____" ], [ "# Creating a model for logistic regression\nmodel = ImplementLogisticRegression()", "_____no_output_____" ], [ "# Training the model with training data\nmodel.fit(X_train, y_train)", "_____no_output_____" ], [ "# Getting the attributes of the line\ntheta = model.theta\ntheta_0 = theta[0]\ntheta_1 = theta[1]\ntheta_2 = theta[2]\n\n# Plotting the points\nplt.scatter(X_train[:, 0], X_train[:, 1], c = y_train)\n\n# Plotting the line dividing the two classes\nx_plot = np.linspace(-2, 3)\ny_plot = -theta_1 / theta_2 * x_plot - theta_0 / theta_2\nplt.plot(x_plot, y_plot, color = 'red')", "_____no_output_____" ], [ "# Obtaining the set of predictions and their accuracy\npredictions, accuracy = model.predict(X_test, y_test)", "_____no_output_____" ], [ "print(accuracy)", "100.0\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01482cd6b436abf266f4a3ce37124cf8ebdb88c
10,388
ipynb
Jupyter Notebook
tutorial/t05_trace_debug_training.ipynb
AriChow/fastestimator
d381d9acc1d42c6cf88a4424e083375cf98140bf
[ "Apache-2.0" ]
null
null
null
tutorial/t05_trace_debug_training.ipynb
AriChow/fastestimator
d381d9acc1d42c6cf88a4424e083375cf98140bf
[ "Apache-2.0" ]
null
null
null
tutorial/t05_trace_debug_training.ipynb
AriChow/fastestimator
d381d9acc1d42c6cf88a4424e083375cf98140bf
[ "Apache-2.0" ]
null
null
null
44.775862
276
0.585387
[ [ [ "# Tutorial 5: Trace - training control and debugging\n\nIn this tutorial, we will talk about another important concept in FastEstimator - Trace.\n\n`Trace` is a class contains has 6 event functions below, each event function will be executed on different events of training loop when putting `Trace` inside `Estimator`. If you are a Keras user, you would see that `Trace` is a combination of callbacks and metrics. \n* on_begin\n* on_epoch_begin\n* on_batch_begin\n* on_batch_end\n* on_epoch_end\n* on_end\n\n`Trace` differs from keras's callback in the following places:\n1. Trace has full access to the preprocessing data and prediction data\n2. Trace can pass data among each other\n3. Trace is simpler and has fewer event functions than keras callbacks\n\n`Trace` can be used for anything that involves training loop, such as changing learning rate, calculating metrics, writing checkpoints and so on.", "_____no_output_____" ], [ "## debugging training loop with Trace\n\nSince `Trace` can have full access to data used in training loop, one natural usage of `Trace` is debugging training loop, for example, printing network prediction for each batch.\n\nRemember in tutorial 3, we customized an operation that scales the prediction score by 10 and write to a new key, let's see whether the operation is working correctly using `Trace`.", "_____no_output_____" ] ], [ [ "import tempfile\n\nimport numpy as np\nimport tensorflow as tf\n\nimport fastestimator as fe\nfrom fastestimator.architecture import LeNet\nfrom fastestimator.estimator.trace import Accuracy, ModelSaver\nfrom fastestimator.network.loss import SparseCategoricalCrossentropy\nfrom fastestimator.network.model import FEModel, ModelOp\nfrom fastestimator.pipeline.processing import Minmax\nfrom fastestimator.util.op import TensorOp\n\nclass Scale(TensorOp):\n def forward(self, data, state):\n data = data * 10\n return data\n\n(x_train, y_train), (x_eval, y_eval) = tf.keras.datasets.mnist.load_data()\ntrain_data = {\"x\": np.expand_dims(x_train, -1), \"y\": y_train}\neval_data = {\"x\": np.expand_dims(x_eval, -1), \"y\": y_eval}\ndata = {\"train\": train_data, \"eval\": eval_data}\npipeline = fe.Pipeline(batch_size=32, data=data, ops=Minmax(inputs=\"x\", outputs=\"x\"))\n\n# step 2. prepare model\nmodel = FEModel(model_def=LeNet, model_name=\"lenet\", optimizer=\"adam\")\nnetwork = fe.Network(\n ops=[ModelOp(inputs=\"x\", model=model, outputs=\"y_pred\"), \n SparseCategoricalCrossentropy(inputs=(\"y\", \"y_pred\")), \n Scale(inputs=\"y_pred\", outputs=\"y_pred_scaled\")])", "_____no_output_____" ] ], [ [ "## define trace", "_____no_output_____" ] ], [ [ "from fastestimator.estimator.trace import Trace\n\nclass ShowPred(Trace):\n def on_batch_end(self, state):\n if state[\"mode\"] == \"train\":\n batch_data = state[\"batch\"]\n print(\"step: {}\".format(state[\"batch_idx\"]))\n print(\"batch data has following keys: {}\".format(list(batch_data.keys())))\n print(\"scaled_prediction is:\")\n print(batch_data[\"y_pred_scaled\"])\n\n# step 3.prepare estimator\nestimator = fe.Estimator(network=network, pipeline=pipeline, epochs=1, traces=ShowPred(), steps_per_epoch=1)", "_____no_output_____" ], [ "estimator.fit()", " ______ __ ______ __ _ __ \n / ____/___ ______/ /_/ ____/____/ /_(_)___ ___ ____ _/ /_____ _____\n / /_ / __ `/ ___/ __/ __/ / ___/ __/ / __ `__ \\/ __ `/ __/ __ \\/ ___/\n / __/ / /_/ (__ ) /_/ /___(__ ) /_/ / / / / / / /_/ / /_/ /_/ / / \n/_/ \\__,_/____/\\__/_____/____/\\__/_/_/ /_/ /_/\\__,_/\\__/\\____/_/ \n \n\nFastEstimator-Warn: No ModelSaver Trace detected. Models will not be saved.\nFastEstimator-Start: step: 0; lenet_lr: 0.001; \nstep: 0\nbatch data has following keys: ['y_pred', 'y', 'x', 'loss', 'y_pred_scaled']\nscaled_prediction is:\ntf.Tensor(\n[[1.0597024 0.88230646 0.9054666 1.0526242 1.0112537 1.1514847\n 0.9731587 0.9711996 0.84732836 1.1454759 ]\n [1.0177196 0.96111745 0.8916435 1.0738678 0.9751328 1.2481465\n 0.9405147 0.87076896 0.8726471 1.148442 ]\n [1.0760062 0.94326234 0.9008551 1.0322686 1.0499443 1.1253775\n 0.93624175 0.9271722 0.90360963 1.1052628 ]\n [1.0469304 0.89323467 0.91441756 1.0751362 0.9745273 1.1652466\n 0.96247584 0.9406713 0.8315316 1.1958287 ]\n [1.0219785 0.929411 0.89820254 1.0585518 0.93793464 1.2132744\n 0.9584836 0.951019 0.8594369 1.1717079 ]\n [1.0567241 0.9066122 0.9052205 1.0659181 1.0157421 1.2072058\n 0.96398747 0.8855149 0.8579869 1.1350882 ]\n [1.0661185 0.91435105 0.89010346 1.0575683 0.9922614 1.2262878\n 0.93575335 0.91625047 0.86531997 1.135985 ]\n [1.0357784 0.8888004 0.8541077 1.0948972 0.98482585 1.283034\n 0.90922797 0.9051948 0.9000034 1.1441307 ]\n [1.0599277 0.90635175 0.89042604 1.0980016 1.0003179 1.2005775\n 0.97344226 0.904382 0.81406707 1.152506 ]\n [1.0498649 0.95371425 0.9321244 1.0166047 1.0222087 1.1368012\n 0.9753012 0.91623485 0.8738795 1.123267 ]\n [1.103452 0.903526 0.9064317 1.0117977 1.0413742 1.1384664\n 0.96658295 0.93786097 0.8479606 1.1425483 ]\n [1.029507 0.92203546 0.9414134 1.023415 1.0161355 1.1061418\n 0.98370135 0.97101694 0.90548897 1.1011443 ]\n [1.0279974 0.95044667 0.93619615 1.0110079 1.0024072 1.1394106\n 0.9575084 0.8984376 0.89733607 1.1792525 ]\n [1.0699053 0.87303096 0.9200075 1.0734357 1.0142893 1.1181858\n 0.9856108 0.93070036 0.8564811 1.1583531 ]\n [1.0348419 0.9044772 0.8707888 1.0723933 1.0153837 1.1527358\n 0.9473658 0.93430406 0.8998435 1.1678661 ]\n [1.0630001 0.8815649 0.8781316 1.080618 0.99296457 1.2163352\n 0.95687056 0.9228797 0.8936867 1.1139493 ]\n [1.0232941 0.8857512 0.8840588 1.092468 0.99615574 1.2249657\n 0.92473567 0.9100239 0.8655537 1.1929938 ]\n [1.0537924 0.88076466 0.8679014 1.1071997 1.006206 1.1429375\n 0.93528 0.9362229 0.8875452 1.1821507 ]\n [1.0308622 0.93516076 0.9209412 1.0852494 1.0089574 1.1817933\n 0.94350743 0.896239 0.8588871 1.138402 ]\n [1.0389919 0.91212773 0.9013858 1.038586 1.0234965 1.1859746\n 0.95688295 0.9387725 0.84085584 1.1629258 ]\n [1.0600939 0.94089186 0.9131027 1.0013218 1.0147965 1.1764416\n 0.965766 0.95196784 0.870939 1.1046789 ]\n [1.1057894 0.8504439 0.83679646 1.1040735 0.9999001 1.2389936\n 0.9062878 0.9403291 0.8776086 1.1397778 ]\n [1.0217856 0.9747643 0.9006238 1.0764693 0.9715878 1.2085975\n 0.9288042 0.89752984 0.8574368 1.1624014 ]\n [1.0469611 0.9568805 0.92177266 1.0700536 0.993606 1.2035027\n 0.9525442 0.9015994 0.8851406 1.067939 ]\n [0.9877974 0.901551 0.93022996 1.0543675 1.0002809 1.1707911\n 0.94319403 0.971319 0.94477963 1.09569 ]\n [0.9924806 0.92723554 0.9150472 1.0373987 1.000831 1.1852853\n 0.9879187 0.9019555 0.8348947 1.216953 ]\n [1.0991246 0.8782563 0.8438319 1.1016914 0.9863124 1.2292806\n 0.9132333 0.9342602 0.892106 1.1219026 ]\n [0.9851291 0.9535258 0.8752247 1.1077297 1.0111363 1.166092\n 0.969571 0.91310537 0.89379835 1.1246873 ]\n [1.0290915 0.88374877 0.84945655 1.0189545 1.0234096 1.2094458\n 0.88590777 0.9749155 0.9239709 1.2010993 ]\n [1.0008084 0.9482253 0.8974297 1.0725788 0.99595183 1.1546551\n 0.9506333 0.9104537 0.90859526 1.1606691 ]\n [1.0367537 0.9001863 0.8841595 1.0721065 0.9803247 1.2551355\n 0.9427656 0.92319757 0.87253726 1.1328338 ]\n [0.9999633 0.9283558 0.8862161 1.0871539 1.0199494 1.1970563\n 0.9454409 0.9472147 0.92662996 1.0620204 ]], shape=(32, 10), dtype=float32)\nFastEstimator-Train: step: 0; loss: 2.3327756; \nFastEstimator-Eval: step: 1; epoch: 0; loss: 2.280537; min_loss: 2.280537; since_best_loss: 0; \nFastEstimator-Finish: step: 1; total_time: 2.58 sec; lenet_lr: 0.001; \n" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d014905065b1be6eded87b4425900b15d5213cae
23,303
ipynb
Jupyter Notebook
examples/Notebooks/flopy3_modflow_boundaries.ipynb
briochh/flopy
51d4442cb0ff96024be0bc81c554a4e1d2d9ed78
[ "CC0-1.0", "BSD-3-Clause" ]
1
2019-11-03T15:06:10.000Z
2019-11-03T15:06:10.000Z
examples/Notebooks/flopy3_modflow_boundaries.ipynb
anguanping/flopy
4a9ea1a8f47467239f40c67b6f6823a424a845df
[ "CC0-1.0", "BSD-3-Clause" ]
null
null
null
examples/Notebooks/flopy3_modflow_boundaries.ipynb
anguanping/flopy
4a9ea1a8f47467239f40c67b6f6823a424a845df
[ "CC0-1.0", "BSD-3-Clause" ]
null
null
null
33.007082
491
0.49131
[ [ [ "# Flopy MODFLOW Boundary Conditions\n\nFlopy has a new way to enter boundary conditions for some MODFLOW packages. These changes are substantial. Boundary conditions can now be entered as a list of boundaries, as a numpy recarray, or as a dictionary. These different styles are described in this notebook.\n\nFlopy also now requires zero-based input. This means that **all boundaries are entered in zero-based layer, row, and column indices**. This means that older Flopy scripts will need to be modified to account for this change. If you are familiar with Python, this should be natural, but if not, then it may take some time to get used to zero-based numbering. Flopy users submit all information in zero-based form, and Flopy converts this to the one-based form required by MODFLOW.\n\nThe following MODFLOW packages are affected by this change:\n\n * Well\n * Drain\n * River\n * General-Head Boundary\n * Time-Variant Constant Head\n \nThis notebook explains the different ways to enter these types of boundary conditions.\n", "_____no_output_____" ] ], [ [ "#begin by importing flopy\nimport os\nimport sys\nimport numpy as np\n\n# run installed version of flopy or add local path\ntry:\n import flopy\nexcept:\n fpth = os.path.abspath(os.path.join('..', '..'))\n sys.path.append(fpth)\n import flopy\n\nworkspace = os.path.join('data')\n#make sure workspace directory exists\nif not os.path.exists(workspace):\n os.makedirs(workspace)\n \nprint(sys.version)\nprint('numpy version: {}'.format(np.__version__))\nprint('flopy version: {}'.format(flopy.__version__))", "flopy is installed in /Users/jdhughes/Documents/Development/flopy_git/flopy_us/flopy\n3.7.3 (default, Mar 27 2019, 16:54:48) \n[Clang 4.0.1 (tags/RELEASE_401/final)]\nnumpy version: 1.16.2\nflopy version: 3.2.12\n" ] ], [ [ "## List of Boundaries", "_____no_output_____" ], [ "Boundary condition information is passed to a package constructor as stress_period_data. In its simplest form, stress_period_data can be a list of individual boundaries, which themselves are lists. The following shows a simple example for a MODFLOW River Package boundary:", "_____no_output_____" ] ], [ [ "stress_period_data = [\n [2, 3, 4, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n [2, 3, 5, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n [2, 3, 6, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n ]\nm = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data)\nm.write_input()", "_____no_output_____" ] ], [ [ "If we look at the River Package created here, you see that the layer, row, and column numbers have been increased by one.", "_____no_output_____" ] ], [ [ "!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 3 0 # stress period 1\r\n 3 4 5 10.7 5000.0 -5.7\r\n 3 4 6 10.7 5000.0 -5.7\r\n 3 4 7 10.7 5000.0 -5.7\r\n" ] ], [ [ "If this model had more than one stress period, then Flopy will assume that this boundary condition information applies until the end of the simulation", "_____no_output_____" ] ], [ [ "m = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\ndis = flopy.modflow.ModflowDis(m, nper=3)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data)\nm.write_input()\n!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 3 0 # stress period 1\r\n 3 4 5 10.7 5000.0 -5.7\r\n 3 4 6 10.7 5000.0 -5.7\r\n 3 4 7 10.7 5000.0 -5.7\r\n -1 0 # stress period 2\r\n -1 0 # stress period 3\r\n" ] ], [ [ "## Recarray of Boundaries\n\nNumpy allows the use of recarrays, which are numpy arrays in which each column of the array may be given a different type. Boundary conditions can be entered as recarrays. Information on the structure of the recarray for a boundary condition package can be obtained from that particular package. The structure of the recarray is contained in the dtype. ", "_____no_output_____" ] ], [ [ "riv_dtype = flopy.modflow.ModflowRiv.get_default_dtype()\nprint(riv_dtype)", "[('k', '<i8'), ('i', '<i8'), ('j', '<i8'), ('stage', '<f4'), ('cond', '<f4'), ('rbot', '<f4')]\n" ] ], [ [ "Now that we know the structure of the recarray that we want to create, we can create a new one as follows.", "_____no_output_____" ] ], [ [ "stress_period_data = np.zeros((3), dtype=riv_dtype)\nstress_period_data = stress_period_data.view(np.recarray)\nprint('stress_period_data: ', stress_period_data)\nprint('type is: ', type(stress_period_data))", "stress_period_data: [(0, 0, 0, 0., 0., 0.) (0, 0, 0, 0., 0., 0.) (0, 0, 0, 0., 0., 0.)]\ntype is: <class 'numpy.recarray'>\n" ] ], [ [ "We can then fill the recarray with our boundary conditions.", "_____no_output_____" ] ], [ [ "stress_period_data[0] = (2, 3, 4, 10.7, 5000., -5.7)\nstress_period_data[1] = (2, 3, 5, 10.7, 5000., -5.7)\nstress_period_data[2] = (2, 3, 6, 10.7, 5000., -5.7)\nprint(stress_period_data)", "[(2, 3, 4, 10.7, 5000., -5.7) (2, 3, 5, 10.7, 5000., -5.7)\n (2, 3, 6, 10.7, 5000., -5.7)]\n" ], [ "m = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data)\nm.write_input()\n!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 3 0 # stress period 1\r\n 3 4 5 10.7 5000.0 -5.7\r\n 3 4 6 10.7 5000.0 -5.7\r\n 3 4 7 10.7 5000.0 -5.7\r\n" ] ], [ [ "As before, if we have multiple stress periods, then this recarray will apply to all of them.", "_____no_output_____" ] ], [ [ "m = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\ndis = flopy.modflow.ModflowDis(m, nper=3)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data)\nm.write_input()\n!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 3 0 # stress period 1\r\n 3 4 5 10.7 5000.0 -5.7\r\n 3 4 6 10.7 5000.0 -5.7\r\n 3 4 7 10.7 5000.0 -5.7\r\n -1 0 # stress period 2\r\n -1 0 # stress period 3\r\n" ] ], [ [ "## Dictionary of Boundaries\n\nThe power of the new functionality in Flopy3 is the ability to specify a dictionary for stress_period_data. If specified as a dictionary, the key is the stress period number (**as a zero-based number**), and the value is either a nested list, an integer value of 0 or -1, or a recarray for that stress period.\n\nLet's say that we want to use the following schedule for our rivers:\n 0. No rivers in stress period zero\n 1. Rivers specified by a list in stress period 1\n 2. No rivers\n 3. No rivers\n 4. No rivers\n 5. Rivers specified by a recarray\n 6. Same recarray rivers\n 7. Same recarray rivers\n 8. Same recarray rivers\n", "_____no_output_____" ] ], [ [ "sp1 = [\n [2, 3, 4, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n [2, 3, 5, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n [2, 3, 6, 10.7, 5000., -5.7], #layer, row, column, stage, conductance, river bottom\n ]\nprint(sp1)", "[[2, 3, 4, 10.7, 5000.0, -5.7], [2, 3, 5, 10.7, 5000.0, -5.7], [2, 3, 6, 10.7, 5000.0, -5.7]]\n" ], [ "riv_dtype = flopy.modflow.ModflowRiv.get_default_dtype()\nsp5 = np.zeros((3), dtype=riv_dtype)\nsp5 = sp5.view(np.recarray)\nsp5[0] = (2, 3, 4, 20.7, 5000., -5.7)\nsp5[1] = (2, 3, 5, 20.7, 5000., -5.7)\nsp5[2] = (2, 3, 6, 20.7, 5000., -5.7)\nprint(sp5)", "[(2, 3, 4, 20.7, 5000., -5.7) (2, 3, 5, 20.7, 5000., -5.7)\n (2, 3, 6, 20.7, 5000., -5.7)]\n" ], [ "sp_dict = {0:0, 1:sp1, 2:0, 5:sp5}\nm = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\ndis = flopy.modflow.ModflowDis(m, nper=8)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=sp_dict)\nm.write_input()\n!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 0 0 # stress period 1\r\n 3 0 # stress period 2\r\n 3 4 5 10.7 5000.0 -5.7\r\n 3 4 6 10.7 5000.0 -5.7\r\n 3 4 7 10.7 5000.0 -5.7\r\n 0 0 # stress period 3\r\n -1 0 # stress period 4\r\n -1 0 # stress period 5\r\n" ] ], [ [ "## MODFLOW Auxiliary Variables\n\nFlopy works with MODFLOW auxiliary variables by allowing the recarray to contain additional columns of information. The auxiliary variables must be specified as package options as shown in the example below.\n\nIn this example, we also add a string in the last column of the list in order to name each boundary condition. In this case, however, we do not include boundname as an auxiliary variable as MODFLOW would try to read it as a floating point number.", "_____no_output_____" ] ], [ [ "#create an empty array with an iface auxiliary variable at the end\nriva_dtype = [('k', '<i8'), ('i', '<i8'), ('j', '<i8'), \n ('stage', '<f4'), ('cond', '<f4'), ('rbot', '<f4'), \n ('iface', '<i4'), ('boundname', object)]\nriva_dtype = np.dtype(riva_dtype)\nstress_period_data = np.zeros((3), dtype=riva_dtype)\nstress_period_data = stress_period_data.view(np.recarray)\nprint('stress_period_data: ', stress_period_data)\nprint('type is: ', type(stress_period_data))", "stress_period_data: [(0, 0, 0, 0., 0., 0., 0, 0) (0, 0, 0, 0., 0., 0., 0, 0)\n (0, 0, 0, 0., 0., 0., 0, 0)]\ntype is: <class 'numpy.recarray'>\n" ], [ "stress_period_data[0] = (2, 3, 4, 10.7, 5000., -5.7, 1, 'riv1')\nstress_period_data[1] = (2, 3, 5, 10.7, 5000., -5.7, 2, 'riv2')\nstress_period_data[2] = (2, 3, 6, 10.7, 5000., -5.7, 3, 'riv3')\nprint(stress_period_data)", "[(2, 3, 4, 10.7, 5000., -5.7, 1, 'riv1')\n (2, 3, 5, 10.7, 5000., -5.7, 2, 'riv2')\n (2, 3, 6, 10.7, 5000., -5.7, 3, 'riv3')]\n" ], [ "m = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data, dtype=riva_dtype, options=['aux iface'])\nm.write_input()\n!head -n 10 'data/test.riv'", "# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0 aux iface\r\n 3 0 # stress period 1\r\n 3 4 5 10.7 5000.0 -5.7 1 riv1\r\n 3 4 6 10.7 5000.0 -5.7 2 riv2\r\n 3 4 7 10.7 5000.0 -5.7 3 riv3\r\n" ] ], [ [ "## Working with Unstructured Grids\n\nFlopy can create an unstructured grid boundary condition package for MODFLOW-USG. This can be done by specifying a custom dtype for the recarray. The following shows an example of how that can be done.", "_____no_output_____" ] ], [ [ "#create an empty array based on nodenumber instead of layer, row, and column\nrivu_dtype = [('nodenumber', '<i8'), ('stage', '<f4'), ('cond', '<f4'), ('rbot', '<f4')]\nrivu_dtype = np.dtype(rivu_dtype)\nstress_period_data = np.zeros((3), dtype=rivu_dtype)\nstress_period_data = stress_period_data.view(np.recarray)\nprint('stress_period_data: ', stress_period_data)\nprint('type is: ', type(stress_period_data))", "stress_period_data: [(0, 0., 0., 0.) (0, 0., 0., 0.) (0, 0., 0., 0.)]\ntype is: <class 'numpy.recarray'>\n" ], [ "stress_period_data[0] = (77, 10.7, 5000., -5.7)\nstress_period_data[1] = (245, 10.7, 5000., -5.7)\nstress_period_data[2] = (450034, 10.7, 5000., -5.7)\nprint(stress_period_data)", "[( 77, 10.7, 5000., -5.7) ( 245, 10.7, 5000., -5.7)\n (450034, 10.7, 5000., -5.7)]\n" ], [ "m = flopy.modflow.Modflow(modelname='test', model_ws=workspace)\nriv = flopy.modflow.ModflowRiv(m, stress_period_data=stress_period_data, dtype=rivu_dtype)\nm.write_input()\nprint(workspace)\n!head -n 10 'data/test.riv'", "data\n# RIV package for MODFLOW-2005, generated by Flopy.\r\n 3 0\r\n 3 0 # stress period 1\r\n 77 10.7 5000.0 -5.7\r\n 245 10.7 5000.0 -5.7\r\n 450034 10.7 5000.0 -5.7\r\n" ] ], [ [ "## Combining two boundary condition packages", "_____no_output_____" ] ], [ [ "ml = flopy.modflow.Modflow(modelname=\"test\",model_ws=workspace)\ndis = flopy.modflow.ModflowDis(ml,10,10,10,10)\nsp_data1 = {3: [1, 1, 1, 1.0],5:[1,2,4,4.0]}\nwel1 = flopy.modflow.ModflowWel(ml, stress_period_data=sp_data1)\nml.write_input()\n!head -n 10 'data/test.wel'", "# WEL package for MODFLOW-2005, generated by Flopy.\r\n 1 0 \r\n 0 0 # stress period 1\r\n 0 0 # stress period 2\r\n 0 0 # stress period 3\r\n 1 0 # stress period 4\r\n 2 2 2 1.0\r\n -1 0 # stress period 5\r\n 1 0 # stress period 6\r\n 2 3 5 4.0\r\n" ], [ "sp_data2 = {0: [1, 1, 3, 3.0],8:[9,2,4,4.0]}\nwel2 = flopy.modflow.ModflowWel(ml, stress_period_data=sp_data2)\nml.write_input()\n!head -n 10 'data/test.wel'", "WARNING: unit 20 of package WEL already in use\n****Warning -- two packages of the same type: <class 'flopy.modflow.mfwel.ModflowWel'> <class 'flopy.modflow.mfwel.ModflowWel'>\nreplacing existing Package...\n# WEL package for MODFLOW-2005, generated by Flopy.\r\n 1 0 \r\n 1 0 # stress period 1\r\n 2 2 4 3.0\r\n -1 0 # stress period 2\r\n -1 0 # stress period 3\r\n -1 0 # stress period 4\r\n -1 0 # stress period 5\r\n -1 0 # stress period 6\r\n -1 0 # stress period 7\r\n" ] ], [ [ "Now we create a third wel package, using the ```MfList.append()``` method:", "_____no_output_____" ] ], [ [ "wel3 = flopy.modflow.ModflowWel(ml,stress_period_data=\\\n wel2.stress_period_data.append(\n wel1.stress_period_data))\nml.write_input()\n!head -n 10 'data/test.wel'", "WARNING: unit 20 of package WEL already in use\n****Warning -- two packages of the same type: <class 'flopy.modflow.mfwel.ModflowWel'> <class 'flopy.modflow.mfwel.ModflowWel'>\nreplacing existing Package...\n# WEL package for MODFLOW-2005, generated by Flopy.\r\n 2 0 \r\n 1 0 # stress period 1\r\n 2 2 4 3.0\r\n -1 0 # stress period 2\r\n -1 0 # stress period 3\r\n 2 0 # stress period 4\r\n 2 2 4 3.0\r\n 2 2 2 1.0\r\n -1 0 # stress period 5\r\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d014999bcc4d4b5336fca5a16827d745f4f584b7
236,999
ipynb
Jupyter Notebook
src/test/resources/Baseline_QA/Baseline_QA_ELECTRA.ipynb
jenka2014/aigents-java-nlp
7b54f76162921389f2e7743b5cc11f16499c1fd5
[ "MIT" ]
null
null
null
src/test/resources/Baseline_QA/Baseline_QA_ELECTRA.ipynb
jenka2014/aigents-java-nlp
7b54f76162921389f2e7743b5cc11f16499c1fd5
[ "MIT" ]
13
2020-06-14T20:04:09.000Z
2021-11-15T11:30:16.000Z
src/test/resources/Baseline_QA/Baseline_QA_ELECTRA.ipynb
jenka2014/aigents-java-nlp
7b54f76162921389f2e7743b5cc11f16499c1fd5
[ "MIT" ]
30
2020-06-06T06:58:04.000Z
2021-07-06T10:24:23.000Z
37.446516
822
0.483846
[ [ [ "<a href=\"https://colab.research.google.com/github/rvignav/aigents-java-nlp/blob/master/src/test/resources/Baseline_QA/Baseline_QA_ELECTRA.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ] ], [ [ "!pip install --quiet transformers sentence-transformers nltk pyter3", "\u001b[K |████████████████████████████████| 2.5MB 7.7MB/s \n\u001b[K |████████████████████████████████| 92kB 11.1MB/s \n\u001b[K |████████████████████████████████| 3.3MB 41.8MB/s \n\u001b[K |████████████████████████████████| 901kB 42.0MB/s \n\u001b[K |████████████████████████████████| 1.2MB 40.3MB/s \n\u001b[?25h Building wheel for sentence-transformers (setup.py) ... \u001b[?25l\u001b[?25hdone\n" ], [ "import json\nfrom pathlib import Path\n\ndef read_squad(path):\n path = Path(path)\n with open(path, 'rb') as f:\n squad_dict = json.load(f)\n\n contexts = []\n questions = []\n answers = []\n for group in squad_dict['data']:\n for passage in group['paragraphs']:\n context = passage['context']\n for qa in passage['qas']:\n question = qa['question']\n for answer in qa['answers']:\n contexts.append(context)\n questions.append(question)\n answers.append(answer)\n\n return contexts, questions, answers\n\ntrain_contexts, train_questions, train_answers = read_squad('/content/drive/MyDrive/squad/train-v2.0.json')\nval_contexts, val_questions, val_answers = read_squad('/content/drive/MyDrive/squad/dev-v2.0.json')", "_____no_output_____" ], [ "def add_end_idx(answers, contexts):\n for answer, context in zip(answers, contexts):\n gold_text = answer['text']\n start_idx = answer['answer_start']\n end_idx = start_idx + len(gold_text)\n answer['answer_end'] = end_idx\n\nadd_end_idx(train_answers, train_contexts)\nadd_end_idx(val_answers, val_contexts)", "_____no_output_____" ], [ "from transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained('deepset/electra-base-squad2')\n\ntrain_encodings = tokenizer(train_contexts, train_questions, truncation=True, padding=True)\nval_encodings = tokenizer(val_contexts, val_questions, truncation=True, padding=True)", "_____no_output_____" ], [ "def add_token_positions(encodings, answers):\n start_positions = []\n end_positions = []\n for i in range(len(answers)):\n start_positions.append(encodings.char_to_token(i, answers[i]['answer_start']))\n end_positions.append(encodings.char_to_token(i, answers[i]['answer_end'] - 1))\n\n # if start position is None, the answer passage has been truncated\n if start_positions[-1] is None:\n start_positions[-1] = tokenizer.model_max_length\n if end_positions[-1] is None:\n end_positions[-1] = tokenizer.model_max_length\n\n encodings.update({'start_positions': start_positions, 'end_positions': end_positions})\n\nadd_token_positions(train_encodings, train_answers)\nadd_token_positions(val_encodings, val_answers)", "_____no_output_____" ], [ "import torch\n\nclass SquadDataset(torch.utils.data.Dataset):\n def __init__(self, encodings):\n self.encodings = encodings\n\n def __getitem__(self, idx):\n return {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n\n def __len__(self):\n return len(self.encodings.input_ids)\n\ntrain_dataset = SquadDataset(train_encodings)\nval_dataset = SquadDataset(val_encodings)", "_____no_output_____" ], [ "from transformers import AutoModelForQuestionAnswering\nmodel = AutoModelForQuestionAnswering.from_pretrained(\"deepset/electra-base-squad2\")", "_____no_output_____" ], [ "from torch.utils.data import DataLoader\nfrom transformers import AdamW\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\nmodel.to(device)\nmodel.train()\n\ntrain_loader = DataLoader(train_dataset, batch_size=16, shuffle=True)\n\noptim = AdamW(model.parameters(), lr=5e-5)\n\nfor epoch in range(3):\n print(\"Epoch: \", epoch+1)\n for batch in train_loader:\n optim.zero_grad()\n input_ids = batch['input_ids'].to(device)\n attention_mask = batch['attention_mask'].to(device)\n start_positions = batch['start_positions'].to(device)\n end_positions = batch['end_positions'].to(device)\n outputs = model(input_ids, attention_mask=attention_mask, start_positions=start_positions, end_positions=end_positions)\n loss = outputs[0]\n loss.backward()\n optim.step()", "Epoch: 1\nEpoch: 2\nEpoch: 3\n" ], [ "model.eval()", "_____no_output_____" ], [ "def wer_score(hyp, ref, print_matrix=False):\n import numpy as np\n N = len(hyp)\n M = len(ref)\n L = np.zeros((N,M))\n for i in range(0, N):\n for j in range(0, M):\n if min(i,j) == 0:\n L[i,j] = max(i,j)\n else:\n deletion = L[i-1,j] + 1\n insertion = L[i,j-1] + 1\n sub = 1 if hyp[i] != ref[j] else 0\n substitution = L[i-1,j-1] + sub\n L[i,j] = min(deletion, min(insertion, substitution))\n if print_matrix:\n print(\"WER matrix ({}x{}): \".format(N, M))\n print(L)\n return int(L[N-1, M-1])\n\ndef metrics(fname):\n # BLEU\n from nltk.translate.bleu_score import sentence_bleu, corpus_bleu\n scores = []\n f = open(\"/content/drive/MyDrive/squad/poc_english.txt\", \"r\")\n f2 = open(fname, \"r\")\n lines = f.readlines()\n cand = f2.readlines()\n for i in range(len(cand)):\n line = lines[i]\n candidate = []\n l = cand[i].lower().strip('\\n')[1:len(cand[i])-2].split(\", \")\n for item in l:\n item = item.strip('.').split(\" \")\n candidate.append(item)\n arr = line.strip('.\\n').split(\" \")\n for i in range(len(arr)):\n arr[i] = arr[i].lower()\n reference = [arr]\n for c in candidate:\n # print(reference, c, ': ', sentence_bleu(reference, c, weights=(1,0)))\n scores.append(sentence_bleu(reference, c, weights=(1,0)))\n\n print(\"BLEU: \" + str(sum(scores)/(1.0*len(scores))))\n\n # Word2Vec Cosine Similarity\n import torch\n import torch.nn.functional as F\n from sentence_transformers import SentenceTransformer\n import nltk\n nltk.download('punkt')\n from nltk import tokenize\n def similarity(par1, par2):\n transformer = SentenceTransformer('roberta-base-nli-stsb-mean-tokens')\n transformer.eval()\n par1 = tokenize.sent_tokenize(par1)\n vec1 = torch.Tensor(transformer.encode(par1))\n vec1 = vec1.mean(0)\n par2 = tokenize.sent_tokenize(par2)\n vec2 = torch.Tensor(transformer.encode(par2))\n vec2 = vec2.mean(0)\n cos_sim = F.cosine_similarity(vec1, vec2, dim=0)\n return cos_sim.item()\n\n scores = []\n f = open(\"/content/drive/MyDrive/squad/poc_english.txt\", \"r\")\n f2 = open(fname, \"r\")\n lines = f.readlines()\n cand = f2.readlines()\n for i in range(len(cand)):\n line = lines[i]\n candidate = []\n l = cand[i].lower().strip('\\n')[1:len(cand[i])-2].split(\", \")\n for item in l:\n item = item.strip('.').split(\" \")\n candidate.append(item)\n arr = line.strip('.\\n').split(\" \")\n if (len(arr) == 1):\n continue\n for i in range(len(arr)):\n arr[i] = arr[i].lower()\n reference = arr\n for c in candidate:\n scores.append(similarity(\" \".join(reference), \" \".join(c)))\n print(\"Word2Vec Cosine Similarity: \" + str(sum(scores)/(1.0*len(scores))))\n\n # WER\n scores = []\n f = open(\"/content/drive/MyDrive/squad/poc_english.txt\", \"r\")\n f2 = open(fname, \"r\")\n lines = f.readlines()\n cand = f2.readlines()\n for i in range(len(cand)):\n line = lines[i]\n candidate = []\n l = cand[i].lower().strip('\\n')[1:len(cand[i])-2].split(\", \")\n for item in l:\n item = item.strip('.').split(\" \")\n candidate.append(item)\n arr = line.strip('.\\n').split(\" \")\n if (len(arr) == 1):\n continue\n for i in range(len(arr)):\n arr[i] = arr[i].lower()\n reference = arr\n for c in candidate:\n scores.append(wer_score(c, reference))\n print(\"WER: \" + str(sum(scores)/(1.0*len(scores))))\n\n # TER\n import pyter\n\n scores = []\n f = open(\"/content/drive/MyDrive/squad/poc_english.txt\", \"r\")\n f2 = open(fname, \"r\")\n lines = f.readlines()\n cand = f2.readlines()\n for i in range(len(cand)):\n line = lines[i]\n candidate = []\n l = cand[i].lower().strip('\\n')[1:len(cand[i])-2].split(\", \")\n for item in l:\n item = item.strip('.').split(\" \")\n candidate.append(item)\n arr = line.strip('.\\n').split(\" \")\n if (len(arr) == 1):\n continue\n for i in range(len(arr)):\n arr[i] = arr[i].lower()\n reference = arr\n for c in candidate:\n scores.append(pyter.ter(reference, c))\n print(\"TER: \" + str(sum(scores)/(1.0*len(scores))))\n\ndef run(modelname, model, tokenizer):\n # model = AutoModelForQuestionAnswering.from_pretrained(modelname)\n # tokenizer = AutoTokenizer.from_pretrained(modelname)\n\n from transformers import pipeline\n nlp = pipeline('question-answering', model=model, tokenizer=tokenizer)\n\n rel_and_food = \"A mom is a human. A dad is a human. A mom is a parent. A dad is a parent. A son is a child. A daughter is a child. A son is a human. A daughter is a human. A mom likes cake. A daughter likes cake. A son likes sausage. A dad likes sausage. Cake is a food. Sausage is a food. Mom is a human now. Dad is a human now. Mom is a parent now. Dad is a parent now. Son is a child now. Daughter is a child now. Son is a human now. Daughter is a human now. Mom likes cake now. Daughter likes cake now. Son likes sausage now. Dad likes sausage now. Cake is a food now. Sausage is a food now. Mom was a daughter before. Dad was a son before. Mom was not a parent before. Dad was not a parent before. Mom liked cake before. Dad liked sausage before. Cake was a food before. Sausage was a food before.\"\n prof = \"Mom is on the board of directors. Dad is on the board of directors. Son is on the board of directors. Daughter is on the board of directors. Mom writes with chalk on the board. Dad writes with chalk on the board. Son writes with chalk on the board. Daughter writes with chalk on the board. Dad wants Mom to be on the board of directors. Mom wants Dad to be on the board of directors. Dad wants his son to be on the board of directors. Mom wants her daughter to be on the board of directors. Mom writes to Dad with chalk on the board. Dad writes to Mom with chalk on the board. Son writes to Dad with chalk on the board. Daughter writes to Mom with chalk on the board.\"\n tools_and_pos = \"Mom has a hammer. Mom has a saw. Dad has a hammer. Dad has a saw. Mom has a telescope. Mom has binoculars. Dad has a telescope. Dad has binoculars. Mom saw Dad with a hammer. Mom saw Dad with a saw. Dad saw Mom with a hammer. Dad saw Mom with a saw. Saw is a tool. Hammer is a tool. Binoculars are a tool. A telescope is a tool. Mom sawed the wood with a saw. Dad sawed the wood with a saw. Son sawed the wood with a saw. Daughter sawed the wood with a saw. Mom knocked the wood with a hammer. Dad knocked the wood with a hammer. Son knocked the wood with a hammer. Daughter knocked the wood with a hammer. Mom saw Dad with binoculars. Mom saw Dad with a telescope. Dad saw Mom with binoculars. Dad saw Mom with a telescope.\"\n\n f = open(\"/content/drive/MyDrive/squad/poc_english_queries.txt\", \"r\")\n f2name = modelname.split(\"/\")[1] + \".txt\"\n f2 = open(f2name, \"w\")\n \n for line in f:\n parts = line.split(\" \")\n context = \"\"\n if \"relationships\" in parts[0]:\n context = rel_and_food\n elif \"tools\" in parts[0]:\n context = tools_and_pos\n else:\n context = prof\n question = \"\"\n for i in range(len(parts)-1):\n question = question + parts[i+1].rstrip() + \" \"\n question = question[0:len(question)-1] + \"?\"\n f2.write(nlp({'question': question, 'context': context })['answer'].replace(\".\",\",\") + \"\\n\")\n \n f2.close()\n\n print(f2name)\n metrics(f2name)\n print('\\n')", "_____no_output_____" ], [ "run('deepset/electra-base-squad2', model, tokenizer)", "electra-base-squad2.txt\n" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d014b22f86ffc0becedbaa300876abc00ac1e88b
8,554
ipynb
Jupyter Notebook
docs/notebook_tutorial/wrap_MT_inference_pipeline.ipynb
Xuezhi-Liang/forte
5ab4c2bb11011a9f05e3c9d427106d02f372b99f
[ "Apache-2.0" ]
null
null
null
docs/notebook_tutorial/wrap_MT_inference_pipeline.ipynb
Xuezhi-Liang/forte
5ab4c2bb11011a9f05e3c9d427106d02f372b99f
[ "Apache-2.0" ]
null
null
null
docs/notebook_tutorial/wrap_MT_inference_pipeline.ipynb
Xuezhi-Liang/forte
5ab4c2bb11011a9f05e3c9d427106d02f372b99f
[ "Apache-2.0" ]
null
null
null
34.079681
336
0.599135
[ [ [ "# Machine Translation Inference Pipeline\n## Packages", "_____no_output_____" ] ], [ [ "import os\nimport shutil\nfrom typing import Dict\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\nfrom forte import Pipeline\nfrom forte.data import DataPack\nfrom forte.common import Resources, Config\nfrom forte.processors.base import PackProcessor\nfrom forte.data.readers import PlainTextReader", "_____no_output_____" ] ], [ [ "## Background\n\nAfter a Data Scientist is satisfied with the results of a training model, they will have their notebook over to an MLE who has to convert their model into an inference model. \n\n## Inference Workflow\n\n### Pipeline\nWe consider `t5-small` as a trained MT model to simplify the example. We should always consider pipeline first when it comes to an inference workflow. As the [glossary](https://asyml-forte.readthedocs.io/en/latest/index_appendices.html#glossary) suggests, it's an inference system that contains a set of processing components. \n\nTherefore, we initialize a `pipeline` below.\n\n", "_____no_output_____" ] ], [ [ "pipeline: Pipeline = Pipeline[DataPack]()", "_____no_output_____" ] ], [ [ "### Reader\nAfter observing the dataset, it's a plain `txt` file. Therefore, we can use `PlainTextReader` directly.", "_____no_output_____" ] ], [ [ "pipeline.set_reader(PlainTextReader())", "_____no_output_____" ] ], [ [ "However, it's still beneficial to take a deeper look at how to design this class so that users can customize a reader when needed.\n", "_____no_output_____" ], [ "\n### Processor\nWe already have an inference model, `t5-small`, and we need a component to make an inference. Therefore, besides the model itself, there are several behaviors needed.\n1. tokenization that transforms input text into sequences of tokens.\n2. since T5 has a better performance given a task prompt, we also want to include the prompt in our data.\n\nIn forte, we have a generic class `PackProcessor` that wraps model and inference-related components and behaviors to process `DataPack`. We need to create a class that inherits the generic method and customizes the behaviors.\n\nThe generic method to process `DataPack` is `_process(self, input_pack: DataPack)`. It should tokenize the input text, use the model class to make an inference, decode the output token ids, and finally writes the output to a target file.\n\nGiven what we discussed, we have a processor class below, and we need to add it to the pipeline after defining it.", "_____no_output_____" ] ], [ [ "\nclass MachineTranslationProcessor(PackProcessor):\n \"\"\"\n Translate the input text and output to a file.\n \"\"\"\n def initialize(self, resources: Resources, configs: Config):\n super().initialize(resources, configs)\n\n # Initialize the tokenizer and model\n model_name: str = self.configs.pretrained_model\n self.tokenizer = T5Tokenizer.from_pretrained(model_name)\n self.model = T5ForConditionalGeneration.from_pretrained(model_name)\n self.task_prefix = \"translate English to German: \"\n self.tokenizer.padding_side = \"left\"\n self.tokenizer.pad_token = self.tokenizer.eos_token\n\n if not os.path.isdir(self.configs.output_folder):\n os.mkdir(self.configs.output_folder)\n\n def _process(self, input_pack: DataPack):\n file_name: str = os.path.join(\n self.configs.output_folder, os.path.basename(input_pack.pack_name)\n )\n\n # en2de machine translation \n inputs = self.tokenizer([\n self.task_prefix + sentence\n for sentence in input_pack.text.split('\\n')\n ], return_tensors=\"pt\", padding=True)\n\n output_sequences = self.model.generate(\n input_ids=inputs[\"input_ids\"],\n attention_mask=inputs[\"attention_mask\"],\n do_sample=False,\n )\n\n outputs = self.tokenizer.batch_decode(\n output_sequences, skip_special_tokens=True\n )\n\n # Write output to the specified file\n with open(file=file_name, mode='w') as f:\n f.write('\\n'.join(outputs))\n\n @classmethod\n def default_configs(cls) -> Dict:\n return {\n \"pretrained_model\": \"t5-small\",\n \"output_folder\": \"mt_test_output\"\n }\n\npipeline.add(MachineTranslationProcessor(), config={\n \"pretrained_model\": \"t5-small\"\n})", "_____no_output_____" ] ], [ [ "\n### Examples\n\n\nWe have a working [MT translation pipeline example](https://github.com/asyml/forte/blob/master/docs/notebook_tutorial/wrap_MT_inference_pipeline.ipynb).\n\nThere are several basic functions of the processor and internal functions defined in this example.\n\n* ``initialize()``: Pipeline will call it at the start of processing. The processor will be initialized with\n ``configs``, and register global resources into ``resource``. The\n implementation should set up the states of the component.\n - initialize a pre-trained model\n - initialize tokenizer\n - initialize model-specific attributes such as task prefix\n* ``process()``: using the loaded model to make predictions and write the prediction results out.\n - we first tokenize the input text\n - then, we use model to generate output sequence ids\n - then, we decode output sequence ids into tokens and write the output into a file", "_____no_output_____" ], [ "After setting up the pipeline's components, we can run the pipeline on the input directory as below.", "_____no_output_____" ] ], [ [ "dir_path = os.path.abspath(\n os.path.join(\"data_samples\", \"machine_translation\")\n ) # notebook should be running from project root folder\n\npipeline.run(dir_path)\nprint(\"Done successfully\")", "_____no_output_____" ] ], [ [ "One can investigate the machine translation output in folder `mt_test_output` located under the script's directory.\nThen we remove the output folder below.", "_____no_output_____" ] ], [ [ "shutil.rmtree(MachineTranslationProcessor.default_configs()[\"output_folder\"])", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d014b9205070944d446b6d7e8c8af7e419ed1d96
13,302
ipynb
Jupyter Notebook
assignments/assignment_yourname_class3.ipynb
Chuyi1202/T81-558-Application-of-Deep-Neural-Networks
55e2a272fb006237f776db4b63c980eff5a1d295
[ "Apache-2.0" ]
1
2022-03-15T07:00:37.000Z
2022-03-15T07:00:37.000Z
assignments/assignment_yourname_class3.ipynb
frankalcantara/t81_558_deep_learning
0eeac399398a52a211a1ecdc0f65d6863aa8a9ae
[ "Apache-2.0" ]
null
null
null
assignments/assignment_yourname_class3.ipynb
frankalcantara/t81_558_deep_learning
0eeac399398a52a211a1ecdc0f65d6863aa8a9ae
[ "Apache-2.0" ]
1
2019-09-01T11:11:09.000Z
2019-09-01T11:11:09.000Z
39.47181
415
0.579462
[ [ [ "# T81-558: Applications of Deep Neural Networks\n* Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), School of Engineering and Applied Science, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)\n* For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).\n\n**Module 3 Assignment: Creating Columns in Pandas**\n\n**Student Name: Your Name**", "_____no_output_____" ], [ "# Assignment Instructions\n\nFor this assignment you will use the **reg-30-spring-2018.csv** dataset. This is a dataset that I generated specifically for this semester. You can find the CSV file in the **data** directory of the class GitHub repository here: [reg-30-spring-2018.csv](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/data/reg-30-spring-2018.csv).\n\nFor this assignment, load and modify the data set. You will submit this modified dataset to the **submit** function. See [Assignment #1](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/assignments/assignment_yourname_class1.ipynb) for details on how to submit an assignment or check that one was submitted.\n\nModify the dataset as follows:\n\n* Add a column named *density* that is *weight* divided by *volume*.\n* Replace the *region* column with dummy variables. \n* Replace the *item* column with an index encoding value (for example 0 for the first class, 1 for the next, etc. see function *encode_text_index*)\n* Your submitted dataframe will have these columns: id, distance, height, landings, number, pack, age, usage, weight, item, volume, width, max, power, size, target, density, region-RE-0, region-RE-1, region-RE-10, region-RE-11, region-RE-2, region-RE-3, region-RE-4, region-RE-5, region-RE-6, region-RE-7, region-RE-8, region-RE-9, region-RE-A, region-RE-B, region-RE-C, region-RE-D, region-RE-E, region-RE-F.", "_____no_output_____" ], [ "# Helpful Functions\n\nYou will see these at the top of every module and assignment. These are simply a set of reusable functions that we will make use of. Each of them will be explained as the semester progresses. They are explained in greater detail as the course progresses. Class 4 contains a complete overview of these functions.", "_____no_output_____" ] ], [ [ "from sklearn import preprocessing\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport shutil\nimport os\nimport requests\nimport base64\n\n\n# Encode text values to dummy variables(i.e. [1,0,0],[0,1,0],[0,0,1] for red,green,blue)\ndef encode_text_dummy(df, name):\n dummies = pd.get_dummies(df[name])\n for x in dummies.columns:\n dummy_name = \"{}-{}\".format(name, x)\n df[dummy_name] = dummies[x]\n df.drop(name, axis=1, inplace=True)\n\n\n# Encode text values to a single dummy variable. The new columns (which do not replace the old) will have a 1\n# at every location where the original column (name) matches each of the target_values. One column is added for\n# each target value.\ndef encode_text_single_dummy(df, name, target_values):\n for tv in target_values:\n l = list(df[name].astype(str))\n l = [1 if str(x) == str(tv) else 0 for x in l]\n name2 = \"{}-{}\".format(name, tv)\n df[name2] = l\n\n\n# Encode text values to indexes(i.e. [1],[2],[3] for red,green,blue).\ndef encode_text_index(df, name):\n le = preprocessing.LabelEncoder()\n df[name] = le.fit_transform(df[name])\n return le.classes_\n\n\n# Encode a numeric column as zscores\ndef encode_numeric_zscore(df, name, mean=None, sd=None):\n if mean is None:\n mean = df[name].mean()\n\n if sd is None:\n sd = df[name].std()\n\n df[name] = (df[name] - mean) / sd\n\n\n# Convert all missing values in the specified column to the median\ndef missing_median(df, name):\n med = df[name].median()\n df[name] = df[name].fillna(med)\n\n\n# Convert all missing values in the specified column to the default\ndef missing_default(df, name, default_value):\n df[name] = df[name].fillna(default_value)\n\n\n# Convert a Pandas dataframe to the x,y inputs that TensorFlow needs\ndef to_xy(df, target):\n result = []\n for x in df.columns:\n if x != target:\n result.append(x)\n # find out the type of the target column. Is it really this hard? :(\n target_type = df[target].dtypes\n target_type = target_type[0] if hasattr(target_type, '__iter__') else target_type\n # Encode to int for classification, float otherwise. TensorFlow likes 32 bits.\n if target_type in (np.int64, np.int32):\n # Classification\n dummies = pd.get_dummies(df[target])\n return df.as_matrix(result).astype(np.float32), dummies.as_matrix().astype(np.float32)\n else:\n # Regression\n return df.as_matrix(result).astype(np.float32), df.as_matrix([target]).astype(np.float32)\n\n# Nicely formatted time string\ndef hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)\n\n\n# Regression chart.\ndef chart_regression(pred,y,sort=True):\n t = pd.DataFrame({'pred' : pred, 'y' : y.flatten()})\n if sort:\n t.sort_values(by=['y'],inplace=True)\n a = plt.plot(t['y'].tolist(),label='expected')\n b = plt.plot(t['pred'].tolist(),label='prediction')\n plt.ylabel('output')\n plt.legend()\n plt.show()\n\n# Remove all rows where the specified column is +/- sd standard deviations\ndef remove_outliers(df, name, sd):\n drop_rows = df.index[(np.abs(df[name] - df[name].mean()) >= (sd * df[name].std()))]\n df.drop(drop_rows, axis=0, inplace=True)\n\n\n# Encode a column to a range between normalized_low and normalized_high.\ndef encode_numeric_range(df, name, normalized_low=-1, normalized_high=1,\n data_low=None, data_high=None):\n if data_low is None:\n data_low = min(df[name])\n data_high = max(df[name])\n\n df[name] = ((df[name] - data_low) / (data_high - data_low)) \\\n * (normalized_high - normalized_low) + normalized_low\n \n# This function submits an assignment. You can submit an assignment as much as you like, only the final\n# submission counts. The paramaters are as follows:\n# data - Pandas dataframe output.\n# key - Your student key that was emailed to you.\n# no - The assignment class number, should be 1 through 1.\n# source_file - The full path to your Python or IPYNB file. This must have \"_class1\" as part of its name. \n# . The number must match your assignment number. For example \"_class2\" for class assignment #2.\ndef submit(data,key,no,source_file=None):\n if source_file is None and '__file__' not in globals(): raise Exception('Must specify a filename when a Jupyter notebook.')\n if source_file is None: source_file = __file__\n suffix = '_class{}'.format(no)\n if suffix not in source_file: raise Exception('{} must be part of the filename.'.format(suffix))\n with open(source_file, \"rb\") as image_file:\n encoded_python = base64.b64encode(image_file.read()).decode('ascii')\n ext = os.path.splitext(source_file)[-1].lower()\n if ext not in ['.ipynb','.py']: raise Exception(\"Source file is {} must be .py or .ipynb\".format(ext))\n r = requests.post(\"https://api.heatonresearch.com/assignment-submit\",\n headers={'x-api-key':key}, json={'csv':base64.b64encode(data.to_csv(index=False).encode('ascii')).decode(\"ascii\"),\n 'assignment': no, 'ext':ext, 'py':encoded_python})\n if r.status_code == 200:\n print(\"Success: {}\".format(r.text))\n else: print(\"Failure: {}\".format(r.text))", "_____no_output_____" ] ], [ [ "# Assignment #3 Sample Code\n\nThe following code provides a starting point for this assignment.", "_____no_output_____" ] ], [ [ "import os\nimport pandas as pd\nfrom scipy.stats import zscore\n\n# This is your student key that I emailed to you at the beginnning of the semester.\nkey = \"qgABjW9GKV1vvFSQNxZW9akByENTpTAo2T9qOjmh\" # This is an example key and will not work.\n\n# You must also identify your source file. (modify for your local setup)\n# file='/resources/t81_558_deep_learning/assignment_yourname_class1.ipynb' # IBM Data Science Workbench\n# file='C:\\\\Users\\\\jeffh\\\\projects\\\\t81_558_deep_learning\\\\t81_558_class1_intro_python.ipynb' # Windows\n#file='/Users/jeff/projects/t81_558_deep_learning/assignment_yourname_class1.ipynb' # Mac/Linux\nfile = '...location of your source file...'\n\n# Begin assignment\npath = \"./data/\"\n\nfilename_read = os.path.join(path,\"reg-30-spring-2018.csv\")\ndf = pd.read_csv(filename_read)\n\n# Calculate density\n# Encode dummies\n\n# Save a copy to examine, if you like\ndf.to_csv('3.csv',index=False)\n\n# Submit\nsubmit(source_file=file,data=df,key=key,no=3)", "_____no_output_____" ] ], [ [ "# Checking Your Submission\n\nYou can always double check to make sure your submission actually happened. The following utility code will help with that.", "_____no_output_____" ] ], [ [ "import requests\nimport pandas as pd\nimport base64\nimport os\n\ndef list_submits(key):\n r = requests.post(\"https://api.heatonresearch.com/assignment-submit\",\n headers={'x-api-key': key},\n json={})\n if r.status_code == 200:\n print(\"Success: \\n{}\".format(r.text))\n else:\n print(\"Failure: {}\".format(r.text))\n\ndef display_submit(key,no):\n r = requests.post(\"https://api.heatonresearch.com/assignment-submit\",\n headers={'x-api-key': key},\n json={'assignment':no})\n if r.status_code == 200:\n print(\"Success: \\n{}\".format(r.text))\n else:\n print(\"Failure: {}\".format(r.text))\n", "_____no_output_____" ], [ "# Show a listing of all submitted assignments.\n\nkey = \"qgABjW9GKV1vvFSQNxZW9akByENTpTAo2T9qOjmh\"\n\nlist_submits(key)", "_____no_output_____" ], [ "# Show one assignment, by number.\n\ndisplay_submit(key,3)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d014c218a1fc629dd37796fe7d670a30bbf7922e
1,805
ipynb
Jupyter Notebook
VAE/VAE.ipynb
DarrenZhang01/Machine-Learning
f111b430330ac38d5dd711033cf068eceaf3600c
[ "MIT" ]
2
2019-01-25T19:39:36.000Z
2019-01-25T19:39:44.000Z
VAE/VAE.ipynb
DarrenZhang01/Machine-Learning
f111b430330ac38d5dd711033cf068eceaf3600c
[ "MIT" ]
3
2018-08-14T14:18:28.000Z
2018-08-18T20:59:01.000Z
VAE/VAE.ipynb
DarrenZhang01/Machine-Learning
f111b430330ac38d5dd711033cf068eceaf3600c
[ "MIT" ]
null
null
null
25.422535
108
0.590582
[ [ [ "## Variational Autoencoder \n\n### From book - \"Hands-On Machine Learning with Scikit-Learn and TensorFlow\"", "_____no_output_____" ], [ "### $\\bullet$ Perform PCA with an undercomplete linear autoencoder\n\n(Undercomplete Autoencode: The internal representation has a lower dimensionality than the input data)", "_____no_output_____" ] ], [ [ "import tensorflow as tf\nfrom tensorflow.contrib.layers import fully_connected\n\nn_inputs = 3 # 3 D input dimension\nn_hidden = 2 # 2 D internal representation\nn_outputs = n_inputs\n\nlearning_rate = 0.01\n\nX = tf.placeholder(tf.float32, shape=[None, n_inputs])\nhidden = fully_connected(X, n_hidden, activation_fn = None)\noutputs = fully_connected(hidden, n_outputs, activation_fn = None)\n\nreconstruction_loss = tf.reduce_mean(tf.square(outputs - X)) # MSE\n\noptimizer = tf.train.AdamOptimizer(learning_rate)\ntraining_op = optimizer.minimize(reconstruction_loss)\n\ninit = tf.global_variables_initializer()", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ] ]
d014d1b13f2220daff5e26d07689bfc884c19111
88,327
ipynb
Jupyter Notebook
GeneradorDeCurvaDeCarga.ipynb
DanielFranco-NEUenergy/probability_distribution_fitting_power_load_profile
d334d84df06b7f97794af9f1fe271833b3eb1784
[ "CC0-1.0" ]
1
2022-02-05T08:52:46.000Z
2022-02-05T08:52:46.000Z
GeneradorDeCurvaDeCarga.ipynb
DanielFranco-NEUenergy/probability_distribution_fitting_power_load_profile
d334d84df06b7f97794af9f1fe271833b3eb1784
[ "CC0-1.0" ]
null
null
null
GeneradorDeCurvaDeCarga.ipynb
DanielFranco-NEUenergy/probability_distribution_fitting_power_load_profile
d334d84df06b7f97794af9f1fe271833b3eb1784
[ "CC0-1.0" ]
null
null
null
68.736965
26,324
0.698665
[ [ [ "import statistics\nimport pprint\nimport pandas as pd\nimport numpy as np\nfrom random import uniform\nfrom tslearn.utils import to_time_series_dataset\nfrom tslearn.metrics import dtw#, gak\nimport plotly.express as px\nimport scipy.stats as st\nimport matplotlib.pyplot as plt \nfrom scipy.optimize import curve_fit\nimport seaborn as sns; sns.set()\n#ToDo: Threading", "_____no_output_____" ], [ "def get_best_distribution(data):\n \n dist_names = [\"gamma\", \"gumbel_l\", \"cauchy\", \"dgamma\", \"beta\", \"betaprime\", \"exponweib\", \"rayleigh\", \"fisk\",\n \"gausshyper\", \"invweibull\", \"pareto\", \"alpha\", \"expon\", \"hypsecant\", \"mielke\", \"loggamma\",\n \"rdist\", \"rice\"] ## Agregar más a voluntad\n dist_results = []\n params = {}\n for dist_name in dist_names:\n dist = getattr(st, dist_name)\n param = dist.fit(data)\n params[dist_name] = param\n # Applying the Kolmogorov-Smirnov test\n D, p = st.kstest(data, dist_name, args=param)\n print(\"p value for \"+dist_name+\" = \"+str(p))\n dist_results.append((dist_name, p))\n\n # select the best fitted distribution\n best_dist, best_p = (max(dist_results, key=lambda item: item[1]))\n # store the name of the best fit and its p value\n print(\"Best fitting distribution: \"+str(best_dist))\n print(\"Best p value: \"+ str(best_p))\n \n parms = params[best_dist]\n #print(\"Parameters for the best fit: \"+ str(parms))\n \n map_parms = {}\n dist = getattr(st, best_dist)\n try:\n counter_wrong_chars = 0 #To solve a bug\n for position, shape_parameter in enumerate(dist.shapes):\n #print(position, shape_parameter)\n if shape_parameter not in [' ', ',']:\n map_parms[shape_parameter] = parms[position-counter_wrong_chars]\n else:\n counter_wrong_chars += 1\n except:\n pass\n finally:\n map_parms[\"loc\"] = parms[-2]\n map_parms[\"scale\"] = parms[-1]\n print(\"Parameters for the best fit: \"+ str(map_parms))\n\n return best_dist, best_p, parms, map_parms", "_____no_output_____" ], [ "def get_optimal_curves(df_curves, example_curves, ts_example_curves, dict_probability_distrs, prob_distrs,\n min_count_generated_curves, a, b, E_min, min_f_load, roof_dtw_distance, min_corr):\n\n I = 5000 #5000\n acum_generated_curves = 0\n\n while acum_generated_curves < min_count_generated_curves:\n for i in range(1,I+1):\n C_i = [None] * 24\n h_max = int(round(uniform(19, 21),0))\n C_i[h_max] = 1\n\n for h, none in enumerate(C_i):\n if h != h_max:\n function = dict_probability_distrs[prob_distrs[h][0]]\n parms = prob_distrs[h][1]\n was_random_number_found = False\n while was_random_number_found is False:\n E = function.rvs(**parms, size=1)[0]\n if (E>=E_min and E<1):\n was_random_number_found = True\n C_i[h] = E\n E_acum = sum(C_i)\n if (E_acum>=a and E_acum<=b):\n #print(C_i, type(C_i))\n f_load = statistics.mean(C_i) / max(C_i)\n if f_load >= min_f_load:\n ts_C_i = to_time_series_dataset(C_i)[0]\n dtw_distances = []\n\n for k, curve in enumerate(ts_example_curves):\n dtw_distance = dtw(ts_C_i, curve)\n dtw_distances.append(dtw_distance)\n average_dtw = statistics.mean(dtw_distances)\n if average_dtw < roof_dtw_distance:\n corrs = []\n\n for example_curve in example_curves:\n corr = np.corrcoef(C_i, example_curve)\n corrs.append(corr[0][1])\n average_corr = statistics.mean(corrs)\n if average_corr>=min_corr:\n print(i, f_load, E_acum, average_dtw, average_corr)\n df_curves = df_curves.append(\n { '0': C_i[0], '1': C_i[1], '2': C_i[2],\n '3': C_i[3], '4': C_i[4], '5': C_i[5],\n '6': C_i[6], '7': C_i[7], '8': C_i[8],\n '9': C_i[9], '10': C_i[10], '11': C_i[11],\n '12': C_i[12], '13': C_i[13], '14': C_i[14],\n '15': C_i[15], '16': C_i[16], '17': C_i[17], \n '18': C_i[18], '19': C_i[19], '20': C_i[20],\n '21': C_i[21], '22': C_i[22], '23': C_i[23],\n 'FC': f_load, 'Sum': E_acum,\n 'DTW_avg_distance': average_dtw, 'Avg_correlation': average_corr },\n ignore_index=True\n )\n acum_generated_curves += 1\n if acum_generated_curves>=min_count_generated_curves:\n \n return (df_curves)", "_____no_output_____" ], [ "df_example_curves = pd.read_excel (r'Curvas.xlsx')\ndf_example_curves.drop(\n df_example_curves.columns[\n df_example_curves.columns.str.contains('unnamed', case = False, na=False)\n ],\n axis = 1,\n inplace = True\n)\n\na = df_example_curves['Sum'].min()\nb = df_example_curves['Sum'].max()\n\ndf_example_curves = df_example_curves.drop(['FC', 'Sum', 'Comentario'], axis=1)\n\nprint(\"a: \", a, \" b: \", b)\nprint(df_example_curves)", "a: 15.161736140999999 b: 19.249227906976746\n 0 1 2 3 4 5 6 \\\n0 0.465685 0.397058 0.367646 0.372548 0.382352 0.421568 0.568626 \n1 0.637209 0.506977 0.469767 0.469767 0.488372 0.548837 0.725581 \n2 0.637209 0.506977 0.469767 0.469767 0.488372 0.548837 0.705581 \n3 0.617209 0.486977 0.449767 0.449767 0.468372 0.528837 0.685581 \n4 0.589328 0.474497 0.439237 0.440463 0.456867 0.512020 0.671343 \n5 0.539052 0.436891 0.403702 0.403582 0.420730 0.470823 0.621096 \n\n 7 8 9 ... 14 15 16 17 \\\n0 0.622548 0.784315 0.779408 ... 0.647058 0.504901 0.495097 0.460784 \n1 0.800000 0.990698 0.981395 ... 0.818605 0.865116 0.893023 0.734884 \n2 0.800000 0.901320 0.891395 ... 0.818605 0.865116 0.893023 0.767884 \n3 0.800000 0.901320 0.891395 ... 0.818605 0.865116 0.873023 0.747884 \n4 0.755637 0.894413 0.885899 ... 0.775718 0.775062 0.788542 0.677859 \n5 0.702376 0.844583 0.829497 ... 0.728547 0.718564 0.731319 0.641747 \n\n 18 19 20 21 22 23 \n0 0.593136 0.931371 1.000000 0.936270 0.838232 0.622548 \n1 0.758140 0.995349 1.000000 0.999256 0.989507 0.809302 \n2 0.758140 0.965742 0.994320 1.000000 0.932314 0.809302 \n3 0.758140 0.925742 1.000000 0.956440 0.932314 0.759302 \n4 0.716889 0.954551 0.999999 1.000000 0.923092 0.750114 \n5 0.694820 1.000000 0.975000 0.978047 0.900141 0.709570 \n\n[6 rows x 24 columns]\n" ], [ "prob_distrs = []\nplots = []\nfor (columnName, columnData) in df_example_curves.iteritems():\n ## Maximizar el p-value ##\n print('Colunm Name : ', columnName)\n #print('Column Contents : ', columnData.values, type(columnData.values), columnData.values.shape)\n best_dist, best_p, parms, map_parms = get_best_distribution(columnData.values)\n prob_distrs.append([best_dist, map_parms])\n #if columnName == 12:\n # ax = sns.distplot(columnData.values, kde=False)\n #ax = sns.distplot(columnData.values, kde=False)\nprint(\"prob_distrs: \")\npprint.pprint(prob_distrs)", "Colunm Name : 0\np value for gamma = 0.9153858662603467\np value for gumbel_l = 0.9611650805916115\np value for cauchy = 0.5828338337557992\np value for dgamma = 0.7103295891934879\np value for beta = 0.4258226481275949\np value for betaprime = 0.9240764030877295\n" ], [ "dict_probability_distrs = { \"gamma\": st.gamma, \"gumbel_l\": st.gumbel_l, \"cauchy\": st.cauchy, \"dgamma\": st.dgamma,\n \"beta\": st.beta, \"betaprime\": st.betaprime, \"exponweib\": st.exponweib, \"rayleigh\": st.rayleigh,\n \"fisk\": st.fisk, \"gausshyper\": st.gausshyper, \"invweibull\": st.invweibull, \"pareto\": st.pareto,\n \"alpha\": st.alpha, \"expon\": st.expon, \"hypsecant\": st.hypsecant, \"mielke\": st.mielke,\n \"loggamma\": st.loggamma, \"rdist\": st.rdist, \"rice\": st.rice }", "_____no_output_____" ], [ "example_curves = df_example_curves.values.tolist()\nts_example_curves = to_time_series_dataset(example_curves)\n#pprint.pprint(ts_example_curves)", "_____no_output_____" ], [ "df_curves = pd.DataFrame(\n columns=[\n '0','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23',\n 'FC','Sum','DTW_avg_distance','Avg_correlation'\n ]\n)\nprint(df_curves)", "Empty DataFrame\nColumns: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, FC, Sum, DTW_avg_distance, Avg_correlation]\nIndex: []\n\n[0 rows x 28 columns]\n" ], [ "E_min = 0.375\nmin_f_load = 0.7625\nmin_count_generated_curves = 25\nroof_dtw_distance = 0.25 #0.25\nmin_corr = 0.95 #0.95\n\ndf_curves = get_optimal_curves(df_curves, example_curves, ts_example_curves, dict_probability_distrs, prob_distrs,\n min_count_generated_curves, a, b, E_min, min_f_load, roof_dtw_distance, min_corr)", "171 0.7647251331973163 18.35340319673559 0.24503839754008055 0.961931004890237\n3323 0.7643839578333489 18.34521498800037 0.24547113782050675 0.9546063977452295\n3563 0.7688067637255614 18.451362329413477 0.24813106456230546 0.965695644938727\n4503 0.765315690300572 18.367576567213728 0.24941423267433088 0.9575925135551389\n4656 0.765117687890543 18.362824509373034 0.2465874505082063 0.950532602590968\n303 0.7638784961744066 18.33308390818576 0.23952572194898983 0.959238945217764\n1338 0.7636822787411582 18.328374689787797 0.24931849291311164 0.9608439128899408\n2178 0.7644288616935668 18.346292680645604 0.24485351528679425 0.958688497511137\n930 0.7625874552448444 18.302098925876262 0.2495956201062087 0.9525297815128697\n1528 0.7665429814088554 18.397031553812532 0.24951533093937722 0.959438828470824\n1975 0.7627562723310513 18.30615053594523 0.246796913882674 0.9597233601560198\n2287 0.7635390850453465 18.324938041088316 0.24536118350853603 0.9683438606387573\n3971 0.7641261700839537 18.339028082014885 0.23196229888748496 0.9662829522957982\n562 0.7679573461710194 18.430976308104466 0.24090494728437123 0.9559613347979116\n4225 0.7645447944666002 18.349075067198406 0.24741318068746676 0.9530021427690418\n2596 0.766774974340985 18.40259938418364 0.23757411804463513 0.9594543074079265\n4091 0.7687534210134895 18.450082104323748 0.24806832111244398 0.9574828187183869\n1743 0.7646185735020942 18.350845764050263 0.2492214675928359 0.9562940913260015\n671 0.7629166230037233 18.30999895208936 0.2456570931394796 0.9611989573514303\n1232 0.7627742357374949 18.306581657699876 0.24759312590266563 0.9612802669225594\n3992 0.7645138801278767 18.34833312306904 0.23946324325089557 0.9667706943315902\n683 0.7724547856448182 18.538914855475635 0.24375775634485272 0.9625265340767684\n2204 0.7630014296179811 18.312034310831546 0.2498586534397405 0.9716069227668427\n3421 0.7626335124546738 18.303204298912167 0.24985713485508393 0.9640309536101712\n4669 0.7657127801281483 18.377106723075556 0.2358868906109151 0.9700543312036126\n" ], [ "print(df_curves)", " 0 1 2 3 4 5 6 \\\n0 0.610331 0.434236 0.449072 0.401362 0.469460 0.527173 0.721520 \n1 0.584646 0.446702 0.425810 0.459555 0.451049 0.591894 0.708186 \n2 0.601119 0.486388 0.416625 0.486396 0.474938 0.542921 0.613405 \n3 0.631015 0.481942 0.445273 0.430063 0.459355 0.515438 0.690438 \n4 0.597480 0.468423 0.495378 0.478195 0.481154 0.464275 0.686589 \n5 0.569603 0.464617 0.444821 0.425164 0.471543 0.528657 0.700729 \n6 0.582364 0.488758 0.444639 0.411429 0.432149 0.587136 0.750045 \n7 0.617605 0.498322 0.432600 0.459844 0.511347 0.508999 0.726855 \n8 0.565926 0.485179 0.439380 0.405086 0.515203 0.525995 0.716504 \n9 0.551098 0.507204 0.489339 0.437384 0.461882 0.560855 0.686293 \n10 0.575701 0.505688 0.479513 0.435272 0.493991 0.500583 0.668671 \n11 0.601050 0.461377 0.428182 0.426886 0.446890 0.503203 0.701188 \n12 0.567889 0.475363 0.469553 0.459329 0.464381 0.507080 0.706435 \n13 0.606832 0.558500 0.446838 0.425723 0.452666 0.489967 0.677048 \n14 0.608089 0.456735 0.426914 0.469663 0.463503 0.484841 0.681581 \n15 0.593597 0.512707 0.452895 0.431816 0.472401 0.549037 0.692828 \n16 0.579149 0.479544 0.462354 0.448638 0.529496 0.525509 0.710774 \n17 0.621202 0.517476 0.395390 0.483414 0.523765 0.511839 0.704509 \n18 0.576396 0.454978 0.464117 0.483288 0.501596 0.562533 0.667869 \n19 0.550749 0.442094 0.408348 0.464953 0.438253 0.587730 0.719126 \n20 0.583954 0.487172 0.466046 0.449141 0.512082 0.586618 0.700041 \n21 0.582889 0.519084 0.445525 0.446502 0.472892 0.480493 0.693889 \n22 0.595415 0.468229 0.486110 0.407924 0.476947 0.528138 0.736204 \n23 0.573570 0.499195 0.437844 0.448753 0.476909 0.569435 0.716191 \n24 0.590578 0.495606 0.457482 0.511778 0.466572 0.523478 0.719049 \n\n 7 8 9 ... 18 19 20 21 \\\n0 0.805128 0.922059 0.854495 ... 0.730580 0.929617 0.995966 1.000000 \n1 0.868968 0.872311 0.898259 ... 0.680803 0.993300 1.000000 0.973175 \n2 0.787211 0.957635 0.878689 ... 0.735049 1.000000 0.993491 0.979745 \n3 0.696887 0.843409 0.861639 ... 0.771561 0.940681 1.000000 0.973746 \n4 0.812116 0.775915 0.888854 ... 0.679053 0.961869 1.000000 0.952062 \n5 0.712695 0.808513 0.869409 ... 0.755403 0.962396 1.000000 0.990879 \n6 0.810974 0.934806 0.891414 ... 0.719578 0.974938 1.000000 0.993948 \n7 0.852907 0.904960 0.872153 ... 0.786658 1.000000 0.998248 0.994051 \n8 0.807198 0.865226 0.855095 ... 0.642927 0.966234 0.998750 1.000000 \n9 0.818304 0.890324 0.872878 ... 0.719487 1.000000 0.991797 0.994524 \n10 0.865534 0.897513 0.931538 ... 0.753769 0.981755 1.000000 0.955048 \n11 0.828686 0.926019 0.892286 ... 0.713970 1.000000 0.995353 0.971315 \n12 0.810038 0.852903 0.908589 ... 0.703210 1.000000 0.995361 0.976136 \n13 0.784099 0.937545 0.824590 ... 0.690498 0.997580 0.994436 1.000000 \n14 0.757003 0.903793 0.885075 ... 0.711228 0.932500 0.993643 1.000000 \n15 0.798460 0.865597 0.932701 ... 0.786419 1.000000 0.985692 0.992387 \n16 0.800766 0.870810 0.885892 ... 0.749812 0.964640 0.990651 1.000000 \n17 0.847926 0.925868 0.932277 ... 0.708537 0.976736 1.000000 0.972218 \n18 0.726720 0.951390 0.863404 ... 0.722370 0.963382 0.995184 1.000000 \n19 0.781916 0.848816 0.857951 ... 0.729142 0.989391 1.000000 0.956537 \n20 0.795496 0.909213 0.879520 ... 0.698039 0.954411 0.998974 1.000000 \n21 0.806863 0.911280 0.902454 ... 0.782325 0.976283 1.000000 0.948558 \n22 0.752517 0.907268 0.865346 ... 0.719541 0.991781 1.000000 0.949757 \n23 0.682673 0.890427 0.878648 ... 0.769428 1.000000 0.991502 0.986843 \n24 0.783704 0.925827 0.887782 ... 0.677566 0.958990 0.990497 1.000000 \n\n 22 23 FC Sum DTW_avg_distance Avg_correlation \n0 0.917261 0.760741 0.764725 18.353403 0.245038 0.961931 \n1 0.944064 0.787006 0.764384 18.345215 0.245471 0.954606 \n2 0.957858 0.770301 0.768807 18.451362 0.248131 0.965696 \n3 0.894379 0.815266 0.765316 18.367577 0.249414 0.957593 \n4 0.944624 0.781170 0.765118 18.362825 0.246587 0.950533 \n5 0.955520 0.744181 0.763878 18.333084 0.239526 0.959239 \n6 0.909428 0.758223 0.763682 18.328375 0.249318 0.960844 \n7 0.955644 0.772123 0.764429 18.346293 0.244854 0.958688 \n8 0.932136 0.795833 0.762587 18.302099 0.249596 0.952530 \n9 0.972552 0.746250 0.766543 18.397032 0.249515 0.959439 \n10 0.993480 0.732835 0.762756 18.306151 0.246797 0.959723 \n11 0.904928 0.775960 0.763539 18.324938 0.245361 0.968344 \n12 0.941367 0.775846 0.764126 18.339028 0.231962 0.966283 \n13 0.925390 0.759007 0.767957 18.430976 0.240905 0.955961 \n14 0.896448 0.757157 0.764545 18.349075 0.247413 0.953002 \n15 0.936814 0.777048 0.766775 18.402599 0.237574 0.959454 \n16 0.946515 0.743536 0.768753 18.450082 0.248068 0.957483 \n17 0.946179 0.744502 0.764619 18.350846 0.249221 0.956294 \n18 0.932184 0.769242 0.762917 18.309999 0.245657 0.961199 \n19 0.937225 0.769230 0.762774 18.306582 0.247593 0.961280 \n20 0.987470 0.758447 0.764514 18.348333 0.239463 0.966771 \n21 0.921090 0.773489 0.772455 18.538915 0.243758 0.962527 \n22 0.923676 0.782096 0.763001 18.312034 0.249859 0.971607 \n23 0.903781 0.750081 0.762634 18.303204 0.249857 0.964031 \n24 0.941060 0.770900 0.765713 18.377107 0.235887 0.970054 \n\n[25 rows x 28 columns]\n" ], [ "for index, row in df_curves.loc[:, \"0\":\"23\"].iterrows():\n fig = px.line(row, width=600, height=300, xlabel='Hora')\n fig.show()", "_____no_output_____" ], [ "average_optimal_curve = df_curves.loc[:, \"0\":\"23\"].mean(axis=0)\n\nprint(average_optimal_curve, type(average_optimal_curve))", "0 0.588730\n1 0.483821\n2 0.448402\n3 0.447502\n4 0.476817\n5 0.530553\n6 0.699839\n7 0.791792\n8 0.891977\n9 0.882838\n10 0.892188\n11 0.902291\n12 0.885800\n13 0.872095\n14 0.822360\n15 0.818517\n16 0.811809\n17 0.729538\n18 0.725478\n19 0.976659\n20 0.996382\n21 0.982437\n22 0.936843\n23 0.766819\ndtype: float64 <class 'pandas.core.series.Series'>\nLoad Factor: \n" ], [ "average_optimal_curve.plot(linewidth=3.0, marker='x', ms=6.5)\nplt.axis((None,None,0,1))\nplt.grid(b=True, which='major', color='k', linestyle='--')\nplt.minorticks_on()\nplt.grid(b=True, which='minor', color='grey', linestyle=':')\nplt.show()\n\nfinal_load_factor = average_optimal_curve.mean() / average_optimal_curve.max()\nprint(\"final_load_factor: \", final_load_factor)\n\nfinal_energy_sum = average_optimal_curve.sum()\nprint(\"final_energy_sum: \", final_energy_sum)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d014d722a3faad4f60c81695c92be284439202ff
56,842
ipynb
Jupyter Notebook
docs/field-read-write.ipynb
ubermag/discretisedfield
fec016c85fcc091006e678845bca999b993b987c
[ "BSD-3-Clause" ]
9
2019-08-30T14:00:43.000Z
2022-01-16T15:01:44.000Z
docs/field-read-write.ipynb
ubermag/discretisedfield
fec016c85fcc091006e678845bca999b993b987c
[ "BSD-3-Clause" ]
50
2019-06-13T13:41:57.000Z
2022-03-28T09:14:33.000Z
docs/field-read-write.ipynb
ubermag/discretisedfield
fec016c85fcc091006e678845bca999b993b987c
[ "BSD-3-Clause" ]
7
2019-08-28T14:16:10.000Z
2021-12-13T21:06:06.000Z
67.03066
5,075
0.803737
[ [ [ "# Reading and writing fields\n\nThere are two main file formats to which a `discretisedfield.Field` object can be saved:\n\n- [VTK](https://vtk.org/) for visualisation using e.g., [ParaView](https://www.paraview.org/) or [Mayavi](https://docs.enthought.com/mayavi/mayavi/)\n- OOMMF [Vector Field File Format (OVF)](https://math.nist.gov/oommf/doc/userguide12a5/userguide/Vector_Field_File_Format_OV.html) for exchanging fields with micromagnetic simulators.\n\nLet us say we have a nanosphere sample:\n\n$$x^2 + y^2 + z^2 <= r^2$$\n\nwith $r=5\\,\\text{nm}$. The space is discretised into cells with dimensions $(0.5\\,\\text{nm}, 0.5\\,\\text{nm}, 0.5\\,\\text{nm})$. The value of the field at $(x, y, z)$ point is $(-cy, cx, cz)$, with $c=10^{9}$. The norm of the field inside the cylinder is $10^{6}$.\n\nLet us first build that field.", "_____no_output_____" ] ], [ [ "import discretisedfield as df\n\nr = 5e-9\ncell = (0.5e-9, 0.5e-9, 0.5e-9)\n\nmesh = df.Mesh(p1=(-r, -r, -r), p2=(r, r, r), cell=cell)\n\ndef norm_fun(pos):\n x, y, z = pos\n if x**2 + y**2 + z**2 <= r**2:\n return 1e6\n else:\n return 0\n \ndef value_fun(pos):\n x, y, z = pos\n c = 1e9\n return (-c*y, c*x, c*z)\n\nfield = df.Field(mesh, dim=3, value=value_fun, norm=norm_fun)", "_____no_output_____" ] ], [ [ "Let us have a quick view of the field we created", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nfield.plane('z').k3d.vector(color_field=field.z)", "_____no_output_____" ] ], [ [ "## Writing the field to a file\n\nThe main method used for saving field in different files is `discretisedfield.Field.write()`. It takes `filename` as an argument, which is a string with one of the following extensions:\n- `'.vtk'` for saving in the VTK format\n- `'.ovf'`, `'.omf'`, `'.ohf'` for saving in the OVF format\n\nLet us firstly save the field in the VTK file.", "_____no_output_____" ] ], [ [ "vtkfilename = 'my_vtk_file.vtk'\nfield.write(vtkfilename)", "_____no_output_____" ] ], [ [ "We can check if the file was saved in the current directory.", "_____no_output_____" ] ], [ [ "import os\nos.path.isfile(f'./{vtkfilename}')", "_____no_output_____" ] ], [ [ "Now, we can delete the file:", "_____no_output_____" ] ], [ [ "os.remove(f'./{vtkfilename}')", "_____no_output_____" ] ], [ [ "Next, we can save the field in the OVF format and check whether it was created in the current directory.", "_____no_output_____" ] ], [ [ "omffilename = 'my_omf_file.omf'\nfield.write(omffilename)\nos.path.isfile(f'./{omffilename}')", "_____no_output_____" ] ], [ [ "There are three different possible representations of an OVF file: one ASCII (`txt`) and two binary (`bin4` or `bin8`). ASCII `txt` representation is a default representation when `discretisedfield.Field.write()` is called. If any different representation is required, it can be passed via `representation` argument.", "_____no_output_____" ] ], [ [ "field.write(omffilename, representation='bin8')\nos.path.isfile(f'./{omffilename}')", "_____no_output_____" ] ], [ [ "## Reading the OVF file\n\nThe method for reading OVF files is a class method `discretisedfield.Field.fromfile()`. By passing a `filename` argument, it reads the file and creates a `discretisedfield.Field` object. It is not required to pass the representation of the OVF file to the `discretisedfield.Field.fromfile()` method, because it can retrieve it from the content of the file.", "_____no_output_____" ] ], [ [ "read_field = df.Field.fromfile(omffilename)", "_____no_output_____" ] ], [ [ "Like previouly, we can quickly visualise the field", "_____no_output_____" ] ], [ [ "# NBVAL_IGNORE_OUTPUT\nread_field.plane('z').k3d.vector(color_field=read_field.z)", "_____no_output_____" ] ], [ [ "Finally, we can delete the OVF file we created.", "_____no_output_____" ] ], [ [ "os.remove(f'./{omffilename}')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d014e9fc761bf3d022475f7184c8938bdb65a24b
24,501
ipynb
Jupyter Notebook
notebooks/guionpracticas.ipynb
smitexx/umucv
875ab90b77fc189a87cef4f16cd090218a574962
[ "BSD-3-Clause" ]
null
null
null
notebooks/guionpracticas.ipynb
smitexx/umucv
875ab90b77fc189a87cef4f16cd090218a574962
[ "BSD-3-Clause" ]
null
null
null
notebooks/guionpracticas.ipynb
smitexx/umucv
875ab90b77fc189a87cef4f16cd090218a574962
[ "BSD-3-Clause" ]
null
null
null
50.83195
866
0.686707
[ [ [ "# Sesiones prácticas", "_____no_output_____" ], [ "## 0", "_____no_output_____" ], [ "Instalación de Python + ecosistema científico + opencv + opengl\n\n- aula virtual -> página web -> install\n- git o unzip master\n- anaconda completo o miniconda\n- windows: opencv y probar los ejemplos\n- linux: primer método más seguro, con paquetes seleccionados\n- probar webcam.py stream.py, surface.py, image_gl.py, hog/facelandmarks.py (en ../data get.sh)\n- manejo básico de jupyter\n\nOpcional:\n\n- compilación opencv\n- probar docker", "_____no_output_____" ], [ "## 2", "_____no_output_____" ], [ "Dispositivos de captura\n\n- webcam.py con opencv crudo\n- spyder\n- umucv (install con --upgrade) (update_umucv.sh)\n- PYTHONPATH\n- stream.py, opciones de autostream, efecto de teclas, --help, --dev=help\n\n - webcams\n - videos\n - carpeta de imágenes\n - teléfono\n - youtube\n - urls de tv\n \n- ejemplo de recorte invertido\n- grabar video de demo (save_video.py)\n", "_____no_output_____" ], [ "## 3", "_____no_output_____" ], [ "Más utilidades: mouse coords, tracker, roi\n\n- medidor.py\n- inrange.py", "_____no_output_____" ], [ "(**PROVISIONAL**)", "_____no_output_____" ], [ "## 4", "_____no_output_____" ], [ "- captura en hilo aparte\n- mean shift / camshift", "_____no_output_____" ], [ "## 5", "_____no_output_____" ], [ "- hog/hog0.py\n\n- hog/pedestrian.py con\n\n- dlib: hog/facelandmarks.py\n\n- dlib: herramienta imglab, hog/train_detector.py, hog/run_detector.py", "_____no_output_____" ], [ "## 6", "_____no_output_____" ], [ "- LK/corners0.py, 1, 2, 3, LK/lk_tracks.py", "_____no_output_____" ], [ "## 7", "_____no_output_____" ], [ "En esta sesión vamos a experimentar con el detector de puntos de interés SIFT. (La implementación de opencv está en un repositorio aparte con las contribuciones \"non free\", pero la patente ha expirado hace unos días. En cualquier caso, la versión de opencv que estamos usando lo incluye.)\n\nNuestro objetivo es calcular un conjunto de \"keypoints\", cada uno con su descriptor (vector de características que describe el entorno del punto), que nos permita encontrarlo en imágenes futuras. Esto tiene una aplicación inmediata para reconocer objetos y más adelante en geometría visual.\n\nEmpezamos con el ejemplo de código code/SIFT/sift0.py, que simplemente calcula y muestra los puntos de interés. Es interesante observar el efecto de los parámetros del método y el tiempo de cómputo en función del tamaño de la imagen (que puedes cambiar con --size o --resize).\n\nEl siguiente ejemplo code/SIFT/sift1.py muestra un primer ataque para establecer correspondencias. Los resultados son bastante pobres.\n\nFinalmente, en code/SIFT/sift.py aplicamos un criterio de selección para eliminar muchas correspondencias erróneas (aunque no todas). Esto es en principio suficiente para el reconocimiento de objetos. (Más adelante veremos una forma mucho mejor de eliminar correspondencias erróneas, necesaria para aplicaciones de geometría.)\n\nEl ejercicio obligatorio **SIFT** es una ampliación sencilla de este código. Se trata de almacenar un conjunto de modelos (¡con textura! para que tengan suficientes keypoints) como portadas de libros, discos, videojuegos, etc. y reconocerlos en base a la proporción de coincidencias detectadas.\n\nUna segunda actividad en esta sesión consiste en comentar el ejemplo de código code/server.py. Utiliza el paquete [flask][flask] para crear un sencillo servidor web que devuelve la imagen de la cámara modificada como deseemos. Sirve como punto de partida para el ejercicio opcional **WEB**.\n\n[flask]: https://en.wikipedia.org/wiki/Flask_(web_framework)", "_____no_output_____" ], [ "## 8", "_____no_output_____" ], [ "En esta sesión vamos a explorar el reconocimiento de formas mediante descriptores frecuenciales.\n\nNuestro objetivo es hacer un programa que reconozca la forma de trébol, como se muestra [en este pantallazo](../../images/demos/shapedetect.png). Si no tenéis a mano un juego de cartas podéis usar --dev=dir:../images/card*.png para hacer las pruebas, aunque lo ideal es hacerlo funcionar con una cámara en vivo.\n\nTrabajaremos con los ejemplos de la carpeta `code/shapes` y, como es habitual, iremos añadiendo poco a poco funcionalidad. En cada nuevo paso los comentarios explican los cambios respecto al paso anterior.\n\nEmpezamos con el ejemplo shapes/trebol1.py, que simplemente prepara un bucle de captura básico, binariza la imagen y muestra los contornos encontrados. Se muestran varias formas de realizar la binarización y se puede experimentar con ellas, pero en principio el método automático propuesto suele funcionar bien en muchos casos.\n\nEl segundo paso en shapes/trebol2.py junta la visualización en una ventana y selecciona los contornos oscuros de tamaño razonable. Esto no es imprescincible para nuestra aplicación, pero es interesante trabajar con el concepto de orientación de un contorno.\n\nEn shapes/trebol3.py leemos un modelo de la silueta trébol de una imagen que tenemos en el repositorio y la mostramos en una ventana.\n\nEn shapes/trebol3b.py hacemos una utilidad para ver gráficamente las componentes frecuenciales como elipses que componen la figura. Podemos ver las componentes en su tamaño natural, incluyendo la frecuencia principal, [como aquí](../images/demos/full-components.png), o quitando la frecuencia principal y ampliando el tamaño de las siguientes, que son la base del descriptor de forma, [como se ve aquí](../images/demos/shape-components.png). Observa que las configuraciones de elipses son parecidas cuando corresponden a la misma silueta.\n\nEn shapes/trebol4.py definimos la función que calcula el descriptor invariante. Se basa esencialmente en calcular los tamaños relativos de estas elipses. En el código se explica cómo se consigue la invarianza a las transformaciones deseadas: posición, tamaño, giros, punto de partida del contorno y ruido de medida.\n\nFinalmente, en shapes/trebol5.py calculamos el descriptor del modelo y en el bucle de captura calculamos los descriptores de los contornos oscuros detectados para marcar las siluetas que tienen un descriptor muy parecido al del trébol.\n\nEl ejercicio opcional SILU consiste en ampliar este código para reconocer un conjunto más amplio de siluetas en alguna aplicación que se os parezca interesante. Por ejemplo, en images/shapes tenéis los modelos de caracteres de las placas de matrícula.", "_____no_output_____" ], [ "## 9", "_____no_output_____" ], [ "En esta sesión vamos a hacer varias actividades. Necesitamos algunos paquetes. En Linux son:\n\n sudo apt install tesseract-ocr tesseract-ocr-spa libtesseract-dev\n pip install tesserocr\n\n sudo apt install libzbar-dev\n pip install pyzbar\n\nUsuarios de Mac y Windows: investigad la forma de instalarlo.\n\n1) En primer lugar nos fijamos en el script `code/ocr.png`, cuya misión es poner en marcha el OCR con la cámara en vivo. Usamos el paquete de python `tesserocr`. Vamos a verificar el funcionamiento con una imagen estática, pero lo ideal es probarlo con la cámara en vivo.\n\n python ocr.py python ocr.py --dev=dir:../images/texto/bo0.png \n\nEstá pensado para marcar una sola línea de texto, [como se muestra aquí](../images/demos/ocr.png). Este pantallazo se ha hecho con la imagen bo1.png disponible en la misma carpeta, que está desenfocada, pero aún así el OCR funciona bien.\n\n\n2) El segundo ejemplo es `code/zbardemo.png` que muestra el uso del paquete pyzbar para leer códigos de barras ([ejemplo](../images/demos/barcode.png)) y códigos QR ([ejemplo](../images/demos/qr.png)) con la cámara. En los códigos de barras se detectan puntos de referencia, y en los QR se detectan las 4 esquinas del cuadrado, que pueden ser útiles como referencia en algunas aplicaciones de geometría.\n\n\n3) A continuación vamos a jugar con un bot de telegram que nos permite comunicarnos cómodamente con nuestro ordenador desde el teléfono móvil, sin necesidad de tener una dirección pública de internet.\n\nVoy a dejar durante esta mañana un bot funcionando para que hagáis pruebas. El bot se llama \"BichoBot\" y su foto de perfil es una pequeña plataforma con ruedas con un raspberry pi encima. Responde al comando /hello y si le enviáis una foto os la devolverá en blanco y negro e invertida. (Está basado en bot3.py).\n\nSimplemente necesitamos:\n\n pip install python-telegram-bot\n\nEl ejemplo `bot/bot0.py` nos envía al teléfono la IP del ordenador (es útil si necesitamos conectarnos por ssh con una máquina que tiene IP dinámica).\n\nEl ejemplo `bot/bot1.py` explica la forma de enviar una imagen nuestro teléfono cuando ocurre algo. En este caso se envía cuando se pulsa una tecla, pero lo normal es detectar automáticamente algún evento con las técnicas de visión artificial que estamos estudiando.\n\nEl ejemplo `bot/bot2.py` explica la forma de hacer que el bot responda a comandos. El comando /hello nos devuelve el saludo, el comando /stop detiene el programa y el comando /image nos devuelve una captura de nuestra webcam. (Se ha usado la captura en un hilo). \n\nEl ejemplo `bot/bot3.py` explica la forma de capturar comandos con argumentos y el procesamiento de una imagen enviada por el usuario.\n\nEsta práctica es completamente opcional, pero es muy útil para enviar cómodamente a nuestros programas de visión artificial una imagen tomada con la cámara sin necesidad de escribir una aplicación específica para el móvil. Algunos ejercicios que estamos haciendo se pueden adaptar fácilmente para probarlos a través de un bot de este tipo.\n\nPara crearos vuestro propio bot tenéis que contactar con el bot de telegram \"BotFather\", que os guiará paso a paso y os dará el token de acceso. Y luego el \"IDBot\" os dirá el id numérico de vuestro usuario.\n\nEn la carpeta hay otros ejemplos más avanzados.\n\n\n4) En la dirección \n\nhttps://github.com/ruvelro/TV-Online-TDT-Spain\n\nse pueden encontrar las url de muchos canales de televisión que están haciendo streaming en directo. Abriendo los ficheros m3u8 encontramos las url que podemos poner en --dev en nuestras aplicaciones (hay distintas resoluciones de imagen). Por ejemplo, la TVE1 está aquí:\n\nhttp://hlsliveamdgl7-lh.akamaihd.net/i/hlsdvrlive_1@583042/index_0400_av-p.m3u8?sd=10&rebase=on\n\n(Funciona a saltos porque autoStream lee los frames lo más rápido posible. Se puede poner un time.sleep para que vaya a ritmo normal).\n\nPróximamente propondré un ejercicio opcional relacionado con esto.", "_____no_output_____" ], [ "## 10", "_____no_output_____" ], [ "Esta sesión está dedicada a poner en marcha una red convolucional sencilla. La tarea que vamos a resolver es el reconocimiento de dígitos manuscritos. Por eso, en primer lugar es conveniente escribir unos cuantos números en una hoja de papel, con un bolígrafo que tenga un trazo no demasiado fino, y sin preocuparnos mucho de que estén bien escritos. Pueden tener distintos tamaños, pero no deben estar muy girados. Para desarrollar el programa y hacer pruebas cómodamente se puede trabajar con una imagen fija, pero la idea es que nuestro programa funcione con la cámara en vivo.\n\n\nTrabajaremos en la carpeta [code/DL/CNN](../code/DL/CNN), donde tenemos las diferentes etapas de ejercicio y una imagen de prueba.\n\nEl primer paso es `digitslive-1.py` que simplemente encuentra las manchas de tinta que pueden ser posibles números.\n\nEn `digitslive-2.py` normalizamos el tamaño de las detecciones para poder utilizar la base de datos MNIST.\n\nEn `digitslive-3.py` implementamos un clasificador gaussiano con reducción de dimensión mediante PCA y lo ponemos en marcha con la imagen en vivo. (Funciona bastante bien pero, p.ej., en la imagen de prueba comete un error).\n\nFinalmente, en `digitslive-4.py` implementamos la clasificación mediante una red convolucional mediante el paquete **keras**. Usamos unos pesos precalculados. (Esta máquina ya no comete el error anterior.)\n\nComo siempre, en cada fase del ejercicio los comentarios explican el código que se va añadiendo.\n\nUna vez conseguido esto, la sesión práctica tiene una segunda actividad que consiste en **entrenar los pesos** de (por ejemplo) esta misma red convolucional. Para hacerlo en nuestro ordenador sin perder la paciencia necesitamos una GPU con CUDA y libCUDNN. La instalación de todo lo necesario puede no ser trivial. \n\nUna alternativa muy práctica es usar [google colab](https://colab.research.google.com/), que proporciona gratuitamente máquinas virtuales con GPU y un entorno de notebooks jupyter (un poco modificados pero compatibles). Para probarlo, entrad con vuestra cuenta de google y abrid un nuevo notebook. En la opción de menú **Runtime** hay que seleccionar **Change runtime type** y en hardware accelerator ponéis GPU. En una celda del notebook copiáis directamente el contenido del archivo `cnntest.py` que hay en este mismo directorio donde estamos trabajando hoy. Al evaluar la celda se descargará la base de datos y se lanzará un proceso de entrenamiento. Cada epoch tarda unos 4s. Podéis comparar con lo que se consigue con la CPU en vuestro propio ordenador. Se puede lanzar un entrenamiento más completo, guardar los pesos y descargarlos a vuestra máquina.\n\nComo curiosidad, podéis comparar con lo que conseguiría el OCR tesseract, y guardar algunos casos de dígitos que estén bien dibujados pero que la red clasifique mal.", "_____no_output_____" ], [ "## 11 ", "_____no_output_____" ], [ "En esta sesión vamos a poner en marcha los modelos avanzados de deep learning que presentamos ayer.\n\nLos ejemplos de código se han probado sobre LINUX. En Windows o Mac puede ser necesario hacer modificaciones; para no perder mucho tiempo mi recomendación es probarlo primero en una máquina virtual.\n\nSi tenéis una GPU nvidia reciente lo ideal es instalar CUDA y libCUDNN para conseguir una mayor velocidad de proceso. Si no tenéis GPU no hay ningún problema, todos los modelos funcionan con CPU. (Los ejercicios de deep learning que requieren entrenamiento son opcionales.)\n\nPara ejecutar las máquinas inception, YOLO y el reconocimiento de caras necesitamos los siguientes paquetes:\n\n pip install face_recognition tensorflow==1.15.0 keras easydict\n\nLa detección de marcadores corporales *openpose* requiere unos pasos de instalación adicionales que explicaremos más adelante.\n\n(La versión 1.15.0 de tensorflow es necesaria para YOLO y openpose. Producirá algunos warnings sin mucha importancia. Si tenemos una versión más reciente de tensorflow podemos hacer `pip install --upgrade tensorflow=1.15.0` o crear un entorno de conda especial para este tema).", "_____no_output_____" ], [ "1) Para probar el **reconocimiento de caras** nos vamos a la carpeta code/DL/facerec. Debe estar correctamente instalado DLIB. \n\nEn el directorio `gente` se guardan los modelos. Como ejemplo tenemos a los miembros de Monty Python:\n\n ./facerec.py --dev=dir:../../../images/monty-python*\n \n(Recuerda que las imágenes seleccionadas con --dev=dir: se avanzan pinchando con el ratón en la ventana pequeña de muestra).\n\nPuedes meter fotos tuyas y de tu familia en la carpeta `gente` para probar con la webcam o con otras fotos.\n\nCon pequeñas modificaciones de este programa se puede resolver el ejercicio ANON: selecciona una cara en la imagen en vivo pinchando con el ratón para ocultarla (emborronándola o pixelizándola) cuando se reconozca en las imágenes siguientes.\n\nEsta versión del reconocimiento de caras no tiene aceleración con GPU (tal vez se puede configurar). Si reducimos un poco el tamaño de la imagen funciona con bastante fluidez.", "_____no_output_____" ], [ "2) Para probar la máquina **inception** nos movemos a la carpeta code/DL/inception.\n\n ./inception0.py\n \n(Se descargará el modelo del la red). Se puede probar con las fotos incluidas en la carpeta con `--dev=dir:*.png`. La versión `inception1.py` captura en hilo aparte y muestra en consola las 5 categorías más probables.\n\nAunque se supone que consigue buenos resultados en las competiciones, sobre imágenes naturales comete bastante errores.", "_____no_output_____" ], [ "3) El funcionamiento de **YOLO** es mucho mejor. Nos vamos a la carpeta code/DL y ejecutamos lo siguiente para para descargar el código y los datos de esta máquina (y de openpose).\n\n bash get.sh\n \nNos metemos en code/DL/yolo y ejecutamos:\n\n /.yolo-v3.py\n \nSe puede probar también con las imágenes de prueba incluidas añadiendo `--dev=dir:*.png`.\n\nEl artículo de [YOLO V3](https://pjreddie.com/media/files/papers/YOLOv3.pdf) es interesante. En la sección 5 el autor explica que abandonó esta línea de investigación por razones éticas. Os recomiendo que la leáis. Como curiosidad, hace unos días apareció [YOLO V4](https://arxiv.org/abs/2004.10934).", "_____no_output_____" ], [ "4) Para probar **openpose** nos vamos a code/DL/openpose. Los archivos necesarios ya se han descargado en el paso anterior, pero necesitamos instalar algunos paquetes. El proceso se explica en el README. ", "_____no_output_____" ], [ "En la carpeta `docker` hay un script para ejecutar una imagen docker que tiene instalados todos los paquetes que hemos estamos usando en la asignatura. Es experimental. No perdaís ahora tiempo con esto si no estáis familiarizados con docker.\n\nEl tema de deep learning en visión artificial es amplísimo. Para estudiarlo en detalle hace falta (como mínimo) una asignatura avanzada (master). Nuestro objetivo es familizarizarnos un poco con algunas de las máquinas preentrenadas disponibles para hacernos una idea de sus ventajas y limitaciones.\n\nSi estáis interesados en estos temas el paso siguiente es adaptar alguno de estos modelos a un problema propio mediante \"transfer learning\", que consiste en utilizar las primeras etapas de una red preentrenada para transformar nuestros datos y ajustar un clasificador sencillo. Alternativamente, se puede reajustar los pesos de un modelo preentrenado, fijando las capas iniciales al principio. Para remediar la posible falta de ejemplos se utilizan técnicas de \"data augmentation\", que generan variantes de los ejemplos de entrenamiento con múltiples transformaciones.", "_____no_output_____" ], [ "## 12", "_____no_output_____" ], [ "Hoy vamos a rectificar el plano de la mesa apoyándonos en marcadores artificiales.\n\nEn primer lugar trabajaremos con marcadores poligonales. Nuestro objetivo es detectar un marcador como el que aparece en el vídeo `images/rot4.mjpg`. Nos vamos a la carpeta `code/polygon`.\n\nEl primer paso (`polygon0.py`) es detectar figuras poligonales con el número de lados correcto a partir de los contornos detectados.\n\nA continuación (`polygon1.py`) nos quedamos con los polígonos que realmente pueden corresponder al marcador. Esto se hace viendo si existe una homografía que relaciona con precisión suficiente el marcador real y su posible imagen.\n\nFinalmente (`polygon2.py`) obtiene el plano rectificado\n\nTambién se puede añadir información \"virtual\" a la imagen original, como por ejemplo los ejes de coordenadas definidos por el marcador (`polygon3.py`).\n\n\nComo segunda actividad, en la carpeta `code/elipses` se muestra la forma de detectar un marcador basado en 4 círculos.", "_____no_output_____" ], [ "## 13", "_____no_output_____" ], [ "En esta sesión vamos a extraer la matriz de cámara a partir del marcador utilizado en la sesión anterior, lo que nos permitirá añadir objetos virtuales tridimensionales a la escena y determinar la posición de la cámara en el espacio.\n\nNos vamos a la carpeta `code/pose`, donde encontraremos los siguientes ejemplos de código:\n\n`pose0.py` incluye el código completo para extraer contornos, detectar el marcador poligonal, extraer la matriz de cámara y dibujar un cubo encima del marcador.\n\n`pose1.py` hace lo mismo con funciones de umucv.\n\n`pose2.py` trata de ocultar el marcador y dibuja un objeto que cambia de tamaño.\n\n`pose3.py` explica la forma de proyectar una imagen en la escena escapando del plano del marcador.\n\n`pose3D.py` es un ejemplo un poco más avanzado que utiliza el paquete pyqtgraph para mostrar en 3D la posición de la cámara en el espacio.\n\nEn el ejercicio **RA** puedes intentar que el comportamiento del objeto virtual dependa de acciones del usuario (p. ej. señalando con el ratón un punto del plano) o de objetos que se encuentran en la escena.", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d014f1c38989748a0f61022a1f95fc01e855ee20
7,031
ipynb
Jupyter Notebook
Jorges Notes/Tutorial_1.ipynb
Chuly90/Astroniz-YT-Tutorials
4f7cbebfa847718d0b65e6e9e253dd1138b2f04d
[ "MIT" ]
null
null
null
Jorges Notes/Tutorial_1.ipynb
Chuly90/Astroniz-YT-Tutorials
4f7cbebfa847718d0b65e6e9e253dd1138b2f04d
[ "MIT" ]
null
null
null
Jorges Notes/Tutorial_1.ipynb
Chuly90/Astroniz-YT-Tutorials
4f7cbebfa847718d0b65e6e9e253dd1138b2f04d
[ "MIT" ]
null
null
null
31.248889
98
0.566349
[ [ [ "# Import the SPICE module\nimport spiceypy", "_____no_output_____" ], [ "# We want to determine the position of our home planet with respect to the Sun.\n# The datetime shall be set as \"today\" (midnight). SPICE requires the\n# Ephemeris Time (ET); thus, we need to convert a UTC datetime string to ET.\n\nimport datetime\n\n# get today's date\nDATE_TODAY = datetime.datetime.today()\n\n# convert the datetime to a string, replacing the time with midnight\nDATE_TODAY = DATE_TODAY.strftime('%Y-%m-%dT00:00:00')\n\n# convert the utc midnight string to the corresponding ET\nspiceypy.furnsh('../kernels/lsk/naif0012.tls') #<-- This is needed.\nET_TODAY_MIDNIGHT = spiceypy.utc2et(DATE_TODAY)", "_____no_output_____" ], [ "# To compute now the position and velocity (so called state) of the Earth\n# with respect to the Sun, we use the following function to determine the\n# state vector and the so called light time (travel time of the light between \n# the Sun and our home planet). Positions are always given in km, velocities \n# in km/s and times in seconds\n\n#First we load the kernel for positional information first:\nspiceypy.furnsh('../kernels/spk/de432s.bsp')\n\n# targ : Object that shall be pointed at (399 := Earth)\n# et : The ET of the computation (Set for today)\n# ref : The reference frame. Here, it is ECLIPJ2000 (the ecliptic plane of the Earth)\n# obs : The observer respectively the center of our state vector computation (10 := Sun)\nEARTH_STATE_WRT_SUN, EARTH_SUN_LT = spiceypy.spkgeo(targ=399, \\\n et=ET_TODAY_MIDNIGHT, \\\n ref='ECLIPJ2000', \\\n obs=10)", "_____no_output_____" ], [ "#The first 3 values are the x, y, z components in km. \n#The last 3 values are the corresponding velocity components in km/s.\nprint(EARTH_STATE_WRT_SUN)", "[-1.47245097e+08 2.04697645e+07 -5.72276764e+02 -4.57590066e+00\n -2.96141750e+01 9.30806649e-04]\n" ], [ "# Is the one-way light time from the observing body\n# to the geometric position of the target body\n# in seconds at the specified epoch.\n# It should be around 8mins\nprint(EARTH_SUN_LT/60)", "8.264668887624136\n" ], [ "# The (Euclidean) distance should be around 1 AU. Why \"around\"? Well the Earth\n# revolves the Sun in a slightly non-perfect circle (elliptic orbit). First, \n# we compute the distance in km.\nimport math\nEARTH_SUN_DISTANCE = math.sqrt(EARTH_STATE_WRT_SUN[0]**2.0 \\\n + EARTH_STATE_WRT_SUN[1]**2.0 \\\n + EARTH_STATE_WRT_SUN[2]**2.0)\n\n# Convert the distance in astronomical units (1 AU)\n# Instead of searching for the \"most recent\" value, we use the default value\n# in SPICE. This way, we can easily compare our results with the results of \n# others.\nEARTH_SUN_DISTANCE_AU = spiceypy.convrt(EARTH_SUN_DISTANCE, 'km', 'AU')\n\n# Cool, it works!\nprint('Current distance between the Earth and the Sun in AU:', \\\n EARTH_SUN_DISTANCE_AU)", "Current distance between the Earth and the Sun in AU: 0.9937382357968857\n" ], [ "#Lets comute the oribital speed of the Earth in km/s\n# First, we compute the actual orbital speed of the Earth around the Sun\nEARTH_ORB_SPEED_WRT_SUN = math.sqrt(EARTH_STATE_WRT_SUN[3]**2.0 \\\n + EARTH_STATE_WRT_SUN[4]**2.0 \\\n + EARTH_STATE_WRT_SUN[5]**2.0)\n\n# It's around 30 km/s\nprint('Current orbital speed of the Earth around the Sun in km/s:', \\\n EARTH_ORB_SPEED_WRT_SUN)", "Current orbital speed of the Earth around the Sun in km/s: 29.96561742728447\n" ] ], [ [ "Now we get the theoretical earth orbital speed:", "_____no_output_____" ] ], [ [ "# Now let's compute the theoretical expectation. First, we load a pck file\n# that contain miscellanoeus information, like the G*M values for different\n# objects\n\n# First, load the kernel\nspiceypy.furnsh('../kernels/pck/gm_de431.tpc')\n_, GM_SUN = spiceypy.bodvcd(bodyid=10, item='GM', maxn=1)\n\n# Now compute the orbital speed\nV_ORB_FUNC = lambda gm, r: math.sqrt(gm/r)\nEARTH_ORB_SPEED_WRT_SUN_THEORY = V_ORB_FUNC(GM_SUN[0], EARTH_SUN_DISTANCE)\n\n# Print the result\nprint('Theoretical orbital speed of the Earth around the Sun in km/s:', \\\n EARTH_ORB_SPEED_WRT_SUN_THEORY)", "Theoretical orbital speed of the Earth around the Sun in km/s: 29.87838444261713\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d014fa8244cf984ccac9c18e13ccf9b23a90febc
21,950
ipynb
Jupyter Notebook
lab_05/lab_05_exercises.ipynb
HSG-AIML/LabGSERM
9c4c80bf6030baea55b4b4b8b56482263a382e28
[ "BSD-3-Clause" ]
1
2021-06-21T11:53:04.000Z
2021-06-21T11:53:04.000Z
lab_05/lab_05_exercises.ipynb
HSG-AIML/LabGSERM
9c4c80bf6030baea55b4b4b8b56482263a382e28
[ "BSD-3-Clause" ]
null
null
null
lab_05/lab_05_exercises.ipynb
HSG-AIML/LabGSERM
9c4c80bf6030baea55b4b4b8b56482263a382e28
[ "BSD-3-Clause" ]
1
2021-06-24T12:23:07.000Z
2021-06-24T12:23:07.000Z
31.446991
584
0.494989
[ [ [ "<img align=\"center\" style=\"max-width: 1000px\" src=\"banner.png\">", "_____no_output_____" ], [ "<img align=\"right\" style=\"max-width: 200px; height: auto\" src=\"hsg_logo.png\">\n\n## Lab 05 - \"Convolutional Neural Networks (CNNs)\" Assignments\n\nGSERM'21 course \"Deep Learning: Fundamentals and Applications\", University of St. Gallen", "_____no_output_____" ], [ "In the last lab we learned how to enhance vanilla Artificial Neural Networks (ANNs) using `PyTorch` to classify even more complex images. Therefore, we used a special type of deep neural network referred to **Convolutional Neural Networks (CNNs)**. CNNs encompass the ability to take advantage of the hierarchical pattern in data and assemble more complex patterns using smaller and simpler patterns. In this lab, we aim to leverage that knowledge by applying it to a set of self-coding assignments. But before we do so let's start with another motivational video by NVIDIA:", "_____no_output_____" ] ], [ [ "from IPython.display import YouTubeVideo\n# NVIDIA: \"Official Intro | GTC 2020 | I AM AI\"\nYouTubeVideo('e2_hsjpTi4w', width=1000, height=500)", "_____no_output_____" ] ], [ [ "As always, pls. don't hesitate to ask all your questions either during the lab, post them in our CANVAS (StudyNet) forum (https://learning.unisg.ch), or send us an email (using the course email).", "_____no_output_____" ], [ "## 1. Assignment Objectives:", "_____no_output_____" ], [ "Similar today's lab session, after today's self-coding assignments you should be able to:\n\n> 1. Understand the basic concepts, intuitions and major building blocks of **Convolutional Neural Networks (CNNs)**.\n> 2. Know how to **implement and to train a CNN** to learn a model of tiny image data.\n> 3. Understand how to apply such a learned model to **classify images** images based on their content into distinct categories.\n> 4. Know how to **interpret and visualize** the model's classification results.", "_____no_output_____" ], [ "## 2. Setup of the Jupyter Notebook Environment", "_____no_output_____" ], [ "Similar to the previous labs, we need to import a couple of Python libraries that allow for data analysis and data visualization. We will mostly use the `PyTorch`, `Numpy`, `Sklearn`, `Matplotlib`, `Seaborn` and a few utility libraries throughout this lab:", "_____no_output_____" ] ], [ [ "# import standard python libraries\nimport os, urllib, io\nfrom datetime import datetime\nimport numpy as np", "_____no_output_____" ] ], [ [ "Import Python machine / deep learning libraries:", "_____no_output_____" ] ], [ [ "# import the PyTorch deep learning library\nimport torch, torchvision\nimport torch.nn.functional as F\nfrom torch import nn, optim\nfrom torch.autograd import Variable", "_____no_output_____" ] ], [ [ "Import the sklearn classification metrics:", "_____no_output_____" ] ], [ [ "# import sklearn classification evaluation library\nfrom sklearn import metrics\nfrom sklearn.metrics import classification_report, confusion_matrix", "_____no_output_____" ] ], [ [ "Import Python plotting libraries:", "_____no_output_____" ] ], [ [ "# import matplotlib, seaborn, and PIL data visualization libary\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom PIL import Image", "_____no_output_____" ] ], [ [ "Enable notebook matplotlib inline plotting:", "_____no_output_____" ] ], [ [ "%matplotlib inline", "_____no_output_____" ] ], [ [ "Import Google's GDrive connector and mount your GDrive directories:", "_____no_output_____" ] ], [ [ "# import the Google Colab GDrive connector\nfrom google.colab import drive\n\n# mount GDrive inside the Colab notebook\ndrive.mount('/content/drive')", "_____no_output_____" ] ], [ [ "Create a structure of Colab Notebook sub-directories inside of GDrive to store (1) the data as well as (2) the trained neural network models:", "_____no_output_____" ] ], [ [ "# create Colab Notebooks directory\nnotebook_directory = '/content/drive/MyDrive/Colab Notebooks'\nif not os.path.exists(notebook_directory): os.makedirs(notebook_directory)\n\n # create data sub-directory inside the Colab Notebooks directory\ndata_directory = '/content/drive/MyDrive/Colab Notebooks/data'\nif not os.path.exists(data_directory): os.makedirs(data_directory)\n\n # create models sub-directory inside the Colab Notebooks directory\nmodels_directory = '/content/drive/MyDrive/Colab Notebooks/models'\nif not os.path.exists(models_directory): os.makedirs(models_directory)", "_____no_output_____" ] ], [ [ "Set a random `seed` value to obtain reproducable results:", "_____no_output_____" ] ], [ [ "# init deterministic seed\nseed_value = 1234\nnp.random.seed(seed_value) # set numpy seed\ntorch.manual_seed(seed_value) # set pytorch seed CPU", "_____no_output_____" ] ], [ [ "Google Colab provides the use of free GPUs for running notebooks. However, if you just execute this notebook as is, it will use your device's CPU. To run the lab on a GPU, got to `Runtime` > `Change runtime type` and set the Runtime type to `GPU` in the drop-down. Running this lab on a CPU is fine, but you will find that GPU computing is faster. *CUDA* indicates that the lab is being run on GPU.\n\nEnable GPU computing by setting the `device` flag and init a `CUDA` seed:", "_____no_output_____" ] ], [ [ "# set cpu or gpu enabled device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu').type\n\n# init deterministic GPU seed\ntorch.cuda.manual_seed(seed_value)\n\n# log type of device enabled\nprint('[LOG] notebook with {} computation enabled'.format(str(device)))", "_____no_output_____" ] ], [ [ "Let's determine if we have access to a GPU provided by e.g. Google's COLab environment:", "_____no_output_____" ] ], [ [ "!nvidia-smi", "_____no_output_____" ] ], [ [ "## 3. Convolutional Neural Networks (CNNs) Assignments", "_____no_output_____" ], [ "### 3.1 CIFAR-10 Dataset Download and Data Assessment", "_____no_output_____" ], [ "The **CIFAR-10 database** (**C**anadian **I**nstitute **F**or **A**dvanced **R**esearch) is a collection of images that are commonly used to train machine learning and computer vision algorithms. The database is widely used to conduct computer vision research using machine learning and deep learning methods:", "_____no_output_____" ], [ "<img align=\"center\" style=\"max-width: 500px; height: 500px\" src=\"cifar10.png\">\n\n(Source: https://www.kaggle.com/c/cifar-10)", "_____no_output_____" ], [ "Further details on the dataset can be obtained via: *Krizhevsky, A., 2009. \"Learning Multiple Layers of Features from Tiny Images\", \n( https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdf ).\"*", "_____no_output_____" ], [ "The CIFAR-10 database contains **60,000 color images** (50,000 training images and 10,000 validation images). The size of each image is 32 by 32 pixels. The collection of images encompasses 10 different classes that represent airplanes, cars, birds, cats, deer, dogs, frogs, horses, ships, and trucks. Let's define the distinct classs for further analytics:", "_____no_output_____" ] ], [ [ "cifar10_classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']", "_____no_output_____" ] ], [ [ "Thereby the dataset contains 6,000 images for each of the ten classes. The CIFAR-10 is a straightforward dataset that can be used to teach a computer how to recognize objects in images.\n\nLet's download, transform and inspect the training images of the dataset. Therefore, we first will define the directory we aim to store the training data:", "_____no_output_____" ] ], [ [ "train_path = data_directory + '/train_cifar10'", "_____no_output_____" ] ], [ [ "Now, let's download the training data accordingly:", "_____no_output_____" ] ], [ [ "# define pytorch transformation into tensor format\ntransf = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n# download and transform training images\ncifar10_train_data = torchvision.datasets.CIFAR10(root=train_path, train=True, transform=transf, download=True)", "_____no_output_____" ] ], [ [ "Verify the volume of training images downloaded:", "_____no_output_____" ] ], [ [ "# get the length of the training data\nlen(cifar10_train_data)", "_____no_output_____" ] ], [ [ "Let's now decide on where we want to store the evaluation data:", "_____no_output_____" ] ], [ [ "eval_path = data_directory + '/eval_cifar10'", "_____no_output_____" ] ], [ [ "And download the evaluation data accordingly:", "_____no_output_____" ] ], [ [ "# define pytorch transformation into tensor format\ntransf = torchvision.transforms.Compose([torchvision.transforms.ToTensor(), torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n\n# download and transform validation images\ncifar10_eval_data = torchvision.datasets.CIFAR10(root=eval_path, train=False, transform=transf, download=True)", "_____no_output_____" ] ], [ [ "Let's also verfify the volume of validation images downloaded:", "_____no_output_____" ] ], [ [ "# get the length of the training data\nlen(cifar10_eval_data)", "_____no_output_____" ] ], [ [ "### 3.2 Convolutional Neural Network (CNN) Model Training and Evaluation", "_____no_output_____" ], [ "<img align=\"center\" style=\"max-width: 900px\" src=\"classification.png\">", "_____no_output_____" ], [ "We recommend you to try the following exercises as part of the self-coding session:\n\n**Exercise 1: Train the neural network architecture of the lab with increased learning rate.**", "_____no_output_____" ], [ "> Increase the learning rate of the network training to a value of **0.1** (instead of currently 0.001) and re-run the network training for 10 training epochs. Load and evaluate the model exhibiting the lowest training loss. What kind of behavior in terms of loss convergence and prediction accuracy can be observed?", "_____no_output_____" ] ], [ [ "#### Step 1. define and init neural network architecture #############################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 2. define loss, training hyperparameters and dataloader ####################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 3. run model training ######################################################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 4. run model evaluation ####################################################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************", "_____no_output_____" ] ], [ [ "**2. Evaluation of \"shallow\" vs. \"deep\" neural network architectures.**", "_____no_output_____" ], [ "> In addition to the architecture of the lab notebook, evaluate further (more **shallow** as well as more **deep**) neural network architectures by either **removing or adding convolutional layers** to the network. Train a model (using the architectures you selected) for at least **20 training epochs**. Analyze the prediction performance of the trained models in terms of training time and prediction accuracy. ", "_____no_output_____" ] ], [ [ "#### Step 1. define and init neural network architecture #############################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 2. define loss, training hyperparameters and dataloader ####################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 3. run model training ######################################################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************\n\n#### Step 4. run model evaluation ####################################################################################\n\n# ***************************************************\n# INSERT YOUR SOLUTION/CODE HERE\n# ***************************************************", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d01504b9f6af5feea95e32fb55343ccdb5b2c284
47,527
ipynb
Jupyter Notebook
Cap10/Mini-Projeto-Solucao/Mini-Projeto2 - Analise3.ipynb
CezarPoeta/Python-Fundamentos
53972d21bea86fdba90a3fafa487be6959ccebb8
[ "MIT" ]
1
2020-07-31T20:31:19.000Z
2020-07-31T20:31:19.000Z
Cap10/Mini-Projeto-Solucao/Mini-Projeto2 - Analise3.ipynb
carlos-freitas-gitHub/python-analytics
4b55cb2acb3383ded700596c5a856b7e2124f2da
[ "Apache-2.0" ]
null
null
null
Cap10/Mini-Projeto-Solucao/Mini-Projeto2 - Analise3.ipynb
carlos-freitas-gitHub/python-analytics
4b55cb2acb3383ded700596c5a856b7e2124f2da
[ "Apache-2.0" ]
null
null
null
255.521505
21,660
0.924485
[ [ [ "# <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 9</font>\n\n## Download: http://github.com/dsacademybr\n\n## Mini-Projeto 2 - Análise Exploratória em Conjunto de Dados do Kaggle\n\n## Análise 3", "_____no_output_____" ] ], [ [ "# Imports\nimport os\nimport subprocess\nimport stat\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import datetime\nsns.set(style=\"white\")\n%matplotlib inline", "_____no_output_____" ], [ "# Dataset\nclean_data_path = \"dataset/autos.csv\"\ndf = pd.read_csv(clean_data_path,encoding=\"latin-1\")", "_____no_output_____" ] ], [ [ "## Preço médio do veículo por tipo de combustível e tipo de caixa de câmbio", "_____no_output_____" ] ], [ [ "# Crie um Barplot com o Preço médio do veículo por tipo de combustível e tipo de caixa de câmbio\nfig, ax = plt.subplots(figsize=(8,5))\ncolors = [\"#00e600\", \"#ff8c1a\",\"#a180cc\"]\nsns.barplot(x=\"fuelType\", y=\"price\",hue=\"gearbox\", palette=\"husl\",data=df)\nax.set_title(\"Preço médio do veículo por tipo de combustível e tipo de caixa de câmbio\",fontdict= {'size':12})\nax.xaxis.set_label_text(\"Tipo de Combustível\",fontdict= {'size':14})\nax.yaxis.set_label_text(\"Preço Médio\",fontdict= {'size':14})\nplt.show()", "_____no_output_____" ], [ "# Salvando o plot\nfig.savefig(\"plots/Analise3/fueltype-vehicleType-price.png\")", "_____no_output_____" ] ], [ [ "## Potência média de um veículo por tipo de veículo e tipo de caixa de câmbio", "_____no_output_____" ] ], [ [ "# Crie um Barplot com a Potência média de um veículo por tipo de veículo e tipo de caixa de câmbio\ncolors = [\"windows blue\", \"amber\", \"greyish\", \"faded green\", \"dusty purple\"]\nfig, ax = plt.subplots(figsize=(8,5))\nsns.set_palette(sns.xkcd_palette(colors))\nsns.barplot(x=\"vehicleType\", y=\"powerPS\",hue=\"gearbox\",data=df)\nax.set_title(\"Potência média de um veículo por tipo de veículo e tipo de caixa de câmbio\",fontdict= {'size':12})\nax.xaxis.set_label_text(\"Tipo de Veículo\",fontdict= {'size':14})\nax.yaxis.set_label_text(\"Potência Média\",fontdict= {'size':14})\nplt.show()", "_____no_output_____" ], [ "# Salvando o plot\nfig.savefig(\"plots/Analise3/vehicletype-fueltype-power.png\")", "_____no_output_____" ] ], [ [ "Conheça a Formação Cientista de Dados, um programa completo, 100% online e 100% em português, com 340 horas, mais de 1.200 aulas em vídeos e 26 projetos, que vão ajudá-lo a se tornar um dos profissionais mais cobiçados do mercado de análise de dados. Clique no link abaixo, faça sua inscrição, comece hoje mesmo e aumente sua empregabilidade:\n\nhttps://www.datascienceacademy.com.br/pages/formacao-cientista-de-dados", "_____no_output_____" ], [ "# Fim", "_____no_output_____" ], [ "### Obrigado - Data Science Academy - <a href=\"http://facebook.com/dsacademybr\">facebook.com/dsacademybr</a>", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d015081e686d7a56a607f5ab1a9dc0ef4521fdee
133
ipynb
Jupyter Notebook
notebooks/in_dev/new plot test.ipynb
ericjmartin/echopype
75d46e1cf3f45da6800b58df703660a967bba305
[ "Apache-2.0" ]
1
2020-07-20T14:21:21.000Z
2020-07-20T14:21:21.000Z
notebooks/in_dev/new plot test.ipynb
ericjmartin/echopype
75d46e1cf3f45da6800b58df703660a967bba305
[ "Apache-2.0" ]
null
null
null
notebooks/in_dev/new plot test.ipynb
ericjmartin/echopype
75d46e1cf3f45da6800b58df703660a967bba305
[ "Apache-2.0" ]
null
null
null
33.25
75
0.887218
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d015106c2d1fb7f7935fd1952f7ca6d47d7fb531
13,499
ipynb
Jupyter Notebook
dolt-demos/iris-example/iris.ipynb
dolthub/metaflow
670327bae0f9d5cb99b5232e02a5f85494f16237
[ "Apache-2.0" ]
null
null
null
dolt-demos/iris-example/iris.ipynb
dolthub/metaflow
670327bae0f9d5cb99b5232e02a5f85494f16237
[ "Apache-2.0" ]
1
2021-01-08T19:45:03.000Z
2021-01-08T19:45:03.000Z
dolt-demos/iris-example/iris.ipynb
dolthub/metaflow
670327bae0f9d5cb99b5232e02a5f85494f16237
[ "Apache-2.0" ]
null
null
null
69.582474
330
0.656789
[ [ [ "!dolt clone vinai/iris-test", "cloning https://doltremoteapi.dolthub.com/vinai/iris-test\nRetrieving remote informatio0 of 9 chunks complete. 0 chunks being downloaded currentl0 of 9 chunks complete. 7 chunks being downloaded currentl0 of 9 chunks complete. 9 chunks being downloaded currentl7 of 9 chunks complete. 2 chunks being downloaded currentl9 of 9 chunks complete. 0 chunks being downloaded currently.\n" ], [ "!dolt clone vinai/iris-model-results", "_____no_output_____" ], [ "!cat dolt_ml_demo.py", "from metaflow import FlowSpec, step, DoltDT\r\nimport pandas as pd\r\nimport pickle\r\nfrom sklearn import tree\r\n\r\nclass DoltMLDemoFlow(FlowSpec):\r\n @step\r\n def start(self):\r\n # Start by getting original dataset\r\n with DoltDT(run=self, doltdb_path='iris-test') as dolt:\r\n self.test_set = dolt.read_table('iris-test')\r\n\r\n self.next(self.predict)\r\n\r\n @step\r\n def predict(self):\r\n with DoltDT(run=self, doltdb_path='iris-model-results') as dolt:\r\n self.model = pickle.load(open('model.p', 'rb'))\r\n self.model_type = 'Decision Tree'\r\n\r\n samples = self.test_set['sample']\r\n y_true = self.test_set['species']\r\n y_true = y_true.rename('labels')\r\n\r\n test = self.test_set.drop(columns=['species', 'sample'])\r\n predictions = pd.Series(self.model.predict(test))\r\n predictions = predictions.rename('predictions')\r\n\r\n self.result = pd.concat([samples, y_true, predictions], axis=1)\r\n\r\n dolt.write_table(table_name='result', df=self.result, pks=['sample'])\r\n\r\n self.next(self.end)\r\n\r\n @step\r\n def end(self):\r\n with DoltDT(run=self, doltdb_path='iris-model-results') as dolt:\r\n dolt.commit_table_writes()\r\n\r\n\r\nif __name__ == '__main__':\r\n DoltMLDemoFlow()\r\n" ], [ "!poetry run python3 dolt_ml_demo.py run", "\u001b[35m\u001b[1mMetaflow 2.2.5.post14+git4337f78\u001b[0m\u001b[35m\u001b[22m executing \u001b[0m\u001b[31m\u001b[1mDoltMLDemoFlow\u001b[0m\u001b[35m\u001b[22m\u001b[0m\u001b[35m\u001b[22m for \u001b[0m\u001b[31m\u001b[1muser:max-hoffman\u001b[0m\u001b[35m\u001b[22m\u001b[K\u001b[0m\u001b[35m\u001b[22m\u001b[0m\n\u001b[35m\u001b[22mValidating your flow...\u001b[K\u001b[0m\u001b[35m\u001b[22m\u001b[0m\n\u001b[32m\u001b[1m The graph looks good!\u001b[K\u001b[0m\u001b[32m\u001b[1m\u001b[0m\n\u001b[35m\u001b[22mRunning pylint...\u001b[K\u001b[0m\u001b[35m\u001b[22m\u001b[0m\n\u001b[32m\u001b[1m Pylint is happy!\u001b[K\u001b[0m\u001b[32m\u001b[1m\u001b[0m\n\u001b[35m2021-01-14 11:00:35.485 \u001b[0m\u001b[1mWorkflow starting (run-id 1610650835478011):\u001b[0m\n\u001b[35m2021-01-14 11:00:35.491 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[1mTask is starting.\u001b[0m\n\u001b[35m2021-01-14 11:00:36.624 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m01-14 11:00:36 doltpy.core.dolt INFO Creating engine for Dolt SQL Server instance running on 127.0.0.1:3306\u001b[0m\n\u001b[35m2021-01-14 11:00:36.709 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m01-14 11:00:36 doltpy.core.dolt INFO * master \tcnt6q9n22svhdvb1n3g90kk43k3b9aol\u001b[0m\n\u001b[35m2021-01-14 11:00:36.806 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:36.806 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m01-14 11:00:36 doltpy.core.dolt INFO\u001b[0m\n\u001b[35m2021-01-14 11:00:36.881 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m01-14 11:00:36 doltpy.core.system_helpers INFO Before exiting cleaning up child processes\u001b[0m\n\u001b[35m2021-01-14 11:00:36.888 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[22m01-14 11:00:36 doltpy.core.system_helpers INFO No processes to clean up, exiting\u001b[0m\n\u001b[35m2021-01-14 11:00:37.064 \u001b[0m\u001b[32m[1610650835478011/start/1 (pid 2132)] \u001b[0m\u001b[1mTask finished successfully.\u001b[0m\n\u001b[35m2021-01-14 11:00:37.072 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[1mTask is starting.\u001b[0m\n\u001b[35m2021-01-14 11:00:38.183 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO Creating engine for Dolt SQL Server instance running on 127.0.0.1:3306\u001b[0m\n\u001b[35m2021-01-14 11:00:38.267 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO * master \tsoe04pejtv523ci3nnegalpbj3eltj9m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.376 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.376 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.write.write INFO No import mode specified, table exists, using \"update\"\u001b[0m\n\u001b[35m2021-01-14 11:00:38.465 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.write.write INFO Importing to table result in dolt directory located in iris-model-results, import mode update\u001b[0m\n\u001b[35m2021-01-14 11:00:38.465 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO Rows Processed: 34, Additions: 0, Modifications: 0, Had No Effect: 34\u001b[0m\n\u001b[35m2021-01-14 11:00:38.510 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO\u001b[0m\n\u001b[35m2021-01-14 11:00:38.603 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO\u001b[0m\n\u001b[35m2021-01-14 11:00:38.693 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO commit db38spd2id84p1gloh2b98heage0tjht\u001b[0m\n\u001b[35m2021-01-14 11:00:38.811 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22mAuthor: Max Hoffman <[email protected]>\u001b[0m\n\u001b[35m2021-01-14 11:00:38.811 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22mDate: Thu Jan 14 11:00:38 -0800 2021\u001b[0m\n\u001b[35m2021-01-14 11:00:38.811 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.811 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\tDoltMLDemoFlow/1610650835478011/predict/2\u001b[0m\n\u001b[35m2021-01-14 11:00:38.812 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.812 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.812 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.dolt INFO * master \tdb38spd2id84p1gloh2b98heage0tjht\u001b[0m\n\u001b[35m2021-01-14 11:00:38.823 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:38.823 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.system_helpers INFO Before exiting cleaning up child processes\u001b[0m\n\u001b[35m2021-01-14 11:00:38.830 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[22m01-14 11:00:38 doltpy.core.system_helpers INFO No processes to clean up, exiting\u001b[0m\n\u001b[35m2021-01-14 11:00:38.989 \u001b[0m\u001b[32m[1610650835478011/predict/2 (pid 2149)] \u001b[0m\u001b[1mTask finished successfully.\u001b[0m\n\u001b[35m2021-01-14 11:00:38.995 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[1mTask is starting.\u001b[0m\n\u001b[35m2021-01-14 11:00:40.106 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.dolt INFO Creating engine for Dolt SQL Server instance running on 127.0.0.1:3306\u001b[0m\n\u001b[35m2021-01-14 11:00:40.194 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.dolt INFO * master \tdb38spd2id84p1gloh2b98heage0tjht\u001b[0m\n\u001b[35m2021-01-14 11:00:40.284 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:40.285 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.dolt INFO\u001b[0m\n\u001b[35m2021-01-14 11:00:40.371 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.dolt INFO commit 8q38kjpfqir8m666qbp4qoucd4u4rla6\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22mAuthor: Max Hoffman <[email protected]>\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22mDate: Thu Jan 14 11:00:40 -0800 2021\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\tDoltMLDemoFlow/1610650835478011/end/3\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:40.499 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.dolt INFO * master \t8q38kjpfqir8m666qbp4qoucd4u4rla6\u001b[0m\n\u001b[35m2021-01-14 11:00:40.509 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m\u001b[0m\n\u001b[35m2021-01-14 11:00:40.509 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.system_helpers INFO Before exiting cleaning up child processes\u001b[0m\n\u001b[35m2021-01-14 11:00:40.517 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[22m01-14 11:00:40 doltpy.core.system_helpers INFO No processes to clean up, exiting\u001b[0m\n\u001b[35m2021-01-14 11:00:40.684 \u001b[0m\u001b[32m[1610650835478011/end/3 (pid 2182)] \u001b[0m\u001b[1mTask finished successfully.\u001b[0m\n\u001b[35m2021-01-14 11:00:40.684 \u001b[0m\u001b[1mDone!\u001b[0m\n01-14 11:00:40 doltpy.core.system_helpers INFO Before exiting cleaning up child processes\n01-14 11:00:40 doltpy.core.system_helpers INFO No processes to clean up, exiting\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d0151d0875f9543f3605669698f1d07fa53c7ac4
201,612
ipynb
Jupyter Notebook
notebooks/misc/calibrate_intensities.ipynb
stjude/punctatools
379845303274e7f0bf92782bf62419b2a4765c6c
[ "Apache-2.0" ]
null
null
null
notebooks/misc/calibrate_intensities.ipynb
stjude/punctatools
379845303274e7f0bf92782bf62419b2a4765c6c
[ "Apache-2.0" ]
null
null
null
notebooks/misc/calibrate_intensities.ipynb
stjude/punctatools
379845303274e7f0bf92782bf62419b2a4765c6c
[ "Apache-2.0" ]
1
2022-01-04T20:00:44.000Z
2022-01-04T20:00:44.000Z
427.144068
137,124
0.930788
[ [ [ "# Calibrate mean and integrated intensity of a fluorescence marker versus concentration\n\n## Requirements\n\n- Images with different concentrations of the fluorescent tag with the concentration clearly specified in the image name\n\nPrepare pure solutions of various concentrations of fluorescent tag in imaging media and collect images using parameters that are identical to those used for the experimental data collection (laser power, acquisition time, magnification, etc).\n\nWe recommend collecting images for 20-30 different concentrations, with 5-10 images per concentration.\n\nClearly mark the concentration in the file or subfolder name in nM or uM. See [example_data/calibration](../../example_data/calibration) for examples of image naming. \n\nNote that the example images that we provide are cropped versions of the full images. You should use full images for calibration!\n\n## Config\n\n### The following code imports and declares functions used for the processing:", "_____no_output_____" ] ], [ [ "#################################\n# Don't modify the code below #\n#################################\n\nimport intake_io\nimport os\nimport re\nimport numpy as np\nimport pylab as plt\nimport seaborn as sns\nfrom skimage import io\nimport pandas as pd\nfrom tqdm import tqdm\nfrom skimage.measure import regionprops_table\n\nfrom am_utils.utils import walk_dir, combine_statistics", "_____no_output_____" ] ], [ [ "## Data & parameters\n\n`input_dir`: folder with images to be analyzed\n\n`output_dir`: folder to save results\n\n`channel_name`: name of the fluorecent tag (e.g. \"GFP\")\n\n## Specify data paths and parameters", "_____no_output_____" ] ], [ [ "input_dir = \"../../example_data/calibration\"\noutput_dir = \"../../test_output/calibration\"\n\nchannel_name = 'GFP'", "_____no_output_____" ] ], [ [ "### The following code lists all images in the input directory:", "_____no_output_____" ] ], [ [ "#################################\n# Don't modify the code below #\n#################################\nsamples = walk_dir(input_dir)\n\nprint(f'{len(samples)} images were found:')\nprint(np.array(samples))", "4 images were found:\n['../../example_data/calibration/05192021_GFPcalibration_1nM_-_Position_4_XY1621491830.tif'\n '../../example_data/calibration/05192021_GFPcalibration_5.62uM_-_Position_5_XY1621485379.tif'\n '../../example_data/calibration/05192021_GFPcalibration_31.6nM_-_Position_2_XY1621488646.tif'\n '../../example_data/calibration/05192021_GFPcalibration_100uM_-_Position_1_XY1621484495.tif']\n" ] ], [ [ "### The following code loads a random image:", "_____no_output_____" ] ], [ [ "#################################\n# Don't modify the code below #\n#################################\n\nsample = samples[np.random.randint(len(samples))]\ndataset = intake_io.imload(sample)\n\nif 'z' in dataset.dims:\n dataset = dataset.max('z')\n\nplt.figure(figsize=(7, 7))\nio.imshow(dataset['image'].data)", "/research/sharedresources/cbi/public/conda_envs/punctatools/lib/python3.9/site-packages/scikit_image-0.19.0-py3.9-linux-x86_64.egg/skimage/io/_plugins/matplotlib_plugin.py:150: UserWarning: Low image data range; displaying image with stretched contrast.\n lo, hi, cmap = _get_display_range(image)\n" ] ], [ [ "### The following code quantifies all input images:", "_____no_output_____" ] ], [ [ "%%time\n#################################\n# Don't modify the code below #\n#################################\n\ndef quantify(sample, input_dir, output_dir, channel_name):\n dataset = intake_io.imload(sample)\n\n img = np.array(dataset['image'].data)\n\n df = pd.DataFrame(regionprops_table(label_image=np.ones_like(img),\n intensity_image=img,\n properties=['area', 'mean_intensity']))\n\n df = df.rename(columns={'area': 'image volume pix', 'mean_intensity': rf'{channel_name} mean intensity per image'}) \n df[rf'{channel_name} integrated intensity per image'] = df[rf'{channel_name} mean intensity per image'] * df['image volume pix']\n \n p_nm = re.compile(rf'([0-9]*\\.?[0-9]+)nM')\n p_um = re.compile(rf'([0-9]*\\.?[0-9]+)uM')\n \n fn = sample[len(input_dir)+1:]\n conc_nM = 0\n if len(p_nm.findall(fn)) > 0:\n conc_nM = float(p_nm.findall(fn)[0])\n if len(p_um.findall(fn)) > 0:\n conc_nM = float(p_um.findall(fn)[0]) * 1000\n \n df[rf'{channel_name} concentration nM'] = conc_nM\n df['Image name'] = fn\n fn_out = os.path.join(output_dir, fn.replace('.' + sample.split('.')[-1], '.csv'))\n \n # save the stats\n os.makedirs(os.path.dirname(fn_out), exist_ok=True)\n df.to_csv(fn_out, index=False)\n \n\nfor sample in tqdm(samples):\n quantify(sample, input_dir, output_dir, channel_name)\n\n# combine the cell stats\nprint('Combining stats...')\ncombine_statistics(output_dir)", "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 4/4 [00:00<00:00, 151.67it/s]\n" ], [ "df = pd.read_csv(output_dir.rstrip('/') + '.csv')\ndf", "_____no_output_____" ] ], [ [ "### The following code plots intensity versus concentration for sanity check", "_____no_output_____" ] ], [ [ "#################################\n# Don't modify the code below #\n#################################\n\nfor col in [rf'{channel_name} concentration nM', rf'{channel_name} mean intensity per image', rf'{channel_name} integrated intensity per image']:\n df['Log ' + col] = np.log10(df[col])\n \nfor col in [rf'{channel_name} mean intensity per image', rf'{channel_name} integrated intensity per image']:\n plt.figure(figsize=(10, 6))\n ax = sns.scatterplot(x = rf'{channel_name} concentration nM', y=col, data=df) \n \n plt.figure(figsize=(10, 6))\n ax = sns.scatterplot(x = rf'Log {channel_name} concentration nM', y='Log ' + col, data=df) \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ] ]
d015200f617ab3f0f8e10ae39f7228d43465cec0
3,846
ipynb
Jupyter Notebook
examples/notebooks/05_drawing_tools.ipynb
ppoon23/geemap
e1a9660336ab9a7eddd702964719118b012db697
[ "MIT" ]
2
2022-03-12T14:46:53.000Z
2022-03-14T12:37:16.000Z
examples/notebooks/05_drawing_tools.ipynb
ppoon23/geemap
e1a9660336ab9a7eddd702964719118b012db697
[ "MIT" ]
null
null
null
examples/notebooks/05_drawing_tools.ipynb
ppoon23/geemap
e1a9660336ab9a7eddd702964719118b012db697
[ "MIT" ]
null
null
null
21.606742
231
0.543942
[ [ [ "<a href=\"https://githubtocolab.com/giswqs/geemap/blob/master/examples/notebooks/05_drawing_tools.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open in Colab\"/></a>\n\nUncomment the following line to install [geemap](https://geemap.org) if needed.", "_____no_output_____" ] ], [ [ "# !pip install geemap", "_____no_output_____" ], [ "import ee\nimport geemap", "_____no_output_____" ], [ "geemap.show_youtube('N7rK2aV1R4c')", "_____no_output_____" ], [ "Map = geemap.Map()\nMap", "_____no_output_____" ], [ "# Add Earth Engine dataset\nimage = ee.Image('USGS/SRTMGL1_003')\n\n# Set visualization parameters.\nvis_params = {\n 'min': 0,\n 'max': 4000,\n 'palette': ['006633', 'E5FFCC', '662A00', 'D8D8D8', 'F5F5F5'],\n}\n\n# Add Earth Engine DEM to map\nMap.addLayer(image, vis_params, 'SRTM DEM')\n\nstates = ee.FeatureCollection(\"TIGER/2018/States\")\nMap.addLayer(states, {}, 'US States')", "_____no_output_____" ], [ "Map.draw_features", "_____no_output_____" ], [ "Map.draw_last_feature", "_____no_output_____" ], [ "roi = ee.FeatureCollection(Map.draw_features)\nselected_states = states.filterBounds(roi)\nMap.addLayer(selected_states, {}, \"Selected states\")", "_____no_output_____" ], [ "clipped_image = image.clip(selected_states)\nMap.addLayer(clipped_image, vis_params, 'Clipped image')", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01521b53a18dee75cbf50b5e5659b1f18a8eb2d
52,544
ipynb
Jupyter Notebook
content/ch-quantum-hardware/cQED-JC-SW.ipynb
muneerqu/qiskit-textbook
b574b7e55c3d737e477f47316812d1d227763b7e
[ "Apache-2.0" ]
null
null
null
content/ch-quantum-hardware/cQED-JC-SW.ipynb
muneerqu/qiskit-textbook
b574b7e55c3d737e477f47316812d1d227763b7e
[ "Apache-2.0" ]
2
2021-09-28T05:31:05.000Z
2022-02-26T09:51:13.000Z
content/ch-quantum-hardware/cQED-JC-SW.ipynb
muneerqu/qiskit-textbook
b574b7e55c3d737e477f47316812d1d227763b7e
[ "Apache-2.0" ]
1
2022-02-23T02:43:58.000Z
2022-02-23T02:43:58.000Z
82.616352
5,600
0.721719
[ [ [ "# Circuit Quantum Electrodynamics", "_____no_output_____" ], [ "## Contents\n\n1. [Introduction](#intro)\n2. [The Schrieffer-Wolff Transformation](#tswt)\n3. [Block-diagonalization of the Jaynes-Cummings Hamiltonian](#bdotjch)\n4. [Full Transmon](#full-transmon)\n5. [Qubit Drive with cQED](#qdwcqed)\n6. [The Cross Resonance Entangling Gate](#tcreg)", "_____no_output_____" ], [ "## 1. Introduction <a id='intro'></a>\n\nBy analogy with Cavity Quantum Electrodynamics (CQED), circuit QED (cQED) exploits the fact that a simple model can be used to both describe the interaction of an atom with an optical cavity and a qubit with a microwave resonator. This model includes the number of photons in the cavity/resonator, the state of the atom/qubit, and the electric dipole interaction between the atom/qubit and cavity/resonator. As we saw in the last section, transmons are actually multi-level systems, but restricting ourselves to the ground $|0\\rangle = |g\\rangle$ and first excited $|1\\rangle = |e\\rangle$ states is possible because of the anharmonicity of the transmon. Therefore we can describe the transmon as a qubit desicribed by the Pauli spin matrices\n$$\n\\sigma^x = \\begin{pmatrix} 0 & 1 \\\\ 1 & 0 \\end{pmatrix} \\qquad\n\\sigma^y = \\begin{pmatrix} 0 & -i \\\\ i & 0 \\end{pmatrix} \\qquad\n\\sigma^z = \\begin{pmatrix} 1 & 0 \\\\ 0 & -1 \\end{pmatrix} \\qquad\n$$\nthat generate rotations the respective axes around the Bloch sphere. In that case, the simplest model to describe this interaction is the Jaynes-Cummings Hamiltonian in the rotating wave approximation,\n$$\nH_{\\rm JC}^{\\rm (RWA)}/\\hbar = \\omega_r a^\\dagger a - \\frac{1}{2} \\omega_q \\sigma_z + g(a^\\dagger \\sigma^- + a \\sigma^+).\n$$\nwhere $\\omega_r$ and $\\omega_r$ are the frequencies of the resonator and \"qubit\", respectively, $a$ ($a^\\dagger$) is the resonator photon annihilation (creation) operator, and $g$ is the electric dipole coupling (half the vacuum Rabi splitting). Note that we are now omitting the hats from the operators. Here, the first term corresponds to the number of photons in the resonator, the second term corresponds to the state of the qubit, and the third is the electric dipole interaction, where $\\sigma^\\pm = (1/2)(\\sigma^x \\mp i\\sigma^y)$ is the qubit raising/lowering operator. (Note that the signs are inverted from those of *spin* raising/lowering operators, as discussed in the previous chapter).\n\nThis Hamiltonian can be solved exactly, and the solutions are hybrid qubit/resonator states where an excitation (either a photon in the resonator or excited state of the qubit) swaps between the two at a rate $g$ when they are on-resonance ($\\omega_r = \\omega_q$). For example, the $a^\\dagger \\sigma^-$ in the third term creates a photon in the resonator and lowers the qubit from $|1\\rangle$ to $|0\\rangle$, while the $a\\sigma^+$ term destroys a photon in the resonators and excites the qubit from $|0\\rangle$ to $|1\\rangle$. While interesting, for our quantum computer we want to deal with qubits, and not these hybrid states. This means we want to move to a regimes where the resonator acts as a perturbation to the qubit (and vice-versa), so that their properties merely become \"dressed\" by the presence of the other. Using a type of perturbation theory, called the Schrieffer-Wolff (S-W) transformation, we can calculate the properties of the qubit and resonator in the regime we wish to operate. Here it should be noted that treating the transmon as a qubit is illustrative for pedagogical reasons, but the same techniques apply when you consider all the levels of the transmon. The higher levels of the transmon have profound effects and must be considered when designing and simulating them. ", "_____no_output_____" ], [ "## 2. The Schrieffer-Wolff Transformation <a id='tswt'></a>\n\n<details>\n <summary>Schrödinger's Equation (Click here to expand)</summary>\nProblems in quantum mechanics are often that of diagolizing a Hamiltonian eigenvalue equation\n$$\nH\\psi_m = E_m \\psi_m \\qquad {\\rm for} \\quad 1 \\le m \\le n\n$$\nwhere the $\\psi_m$ are the eigenstates with eigenvalue $E_m$. This consists of finding a unitary matrix $U$, such that $H' = U H U^\\dagger$ is diagonal. Then the eigenvalue equation\n$$\n\\hat{H} \\psi_m = E_m \\psi_m \\Longrightarrow U H U^\\dagger U \\psi_m = E_m U \\psi_m \\Longrightarrow H' \\psi_m' = E_m \\psi_m'\n$$\nwhere $\\psi_m' = U\\psi_m$ are the transformed eigenstates and\n$$\nH' = \\begin{pmatrix}\nE_1 & 0 & \\cdots & 0 \\\\\n0 & E_2 & \\cdots & 0 \\\\\n\\vdots & \\vdots & \\ddots & 0 \\\\\n0 & 0 & \\cdots & E_n \\end{pmatrix}\n$$\nis the diagonalized Hamiltonian.\n</details>\n\nWith the S-W transformation, instead of diagolizing the Hamiltonian, we seek to *block-diagonalize* it. Suppose we have a Hamiltonian that can be broken up into a diagonal part and perturbation\n$$\nH \\quad = \\quad \\underbrace{\\begin{pmatrix}\n\\Box & & & & & & \\\\\n & \\Box & & & & & \\\\\n & & \\Box & & & & \\\\\n & & & \\Box & & & \\\\\n & & & & \\Box & & \\\\\n & & & & & \\Box & \\\\\n & & & & & & \\Box \\end{pmatrix}}_\\text{diagonal} \\quad + \\quad \n\\underbrace{\\begin{pmatrix}\n\\times & \\times & \\times & \\times & \\cdot & \\cdot & \\cdot \\\\\n\\times & \\times & \\times & \\times & \\cdot & \\cdot & \\cdot \\\\\n\\times & \\times & \\times & \\times & \\cdot & \\cdot & \\cdot \\\\\n\\times & \\times & \\times & \\times & \\cdot & \\cdot & \\cdot \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & \\times & \\times & \\times \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & \\times & \\times & \\times \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & \\times & \\times & \\times \\end{pmatrix}}_\\text{perturbation}\n$$\nand then write the perturbation as $H_1 + H_2$ so that $H = H_0 + H_1 + H_2$, with $H_0$ diagonal, $H_1$ block-diagonal, and $H_2$ non-block diagonal, and we have\n$$\nH \\quad = \\quad \\underbrace{\\begin{pmatrix}\n\\Box & & & & & & \\\\\n & \\Box & & & & & \\\\\n & & \\Box & & & & \\\\\n & & & \\Box & & & \\\\\n & & & & \\Box & & \\\\\n & & & & & \\Box & \\\\\n & & & & & & \\Box \\end{pmatrix}}_\\text{diagonal}\n\\quad + \\quad\n\\underbrace{\\begin{pmatrix}\n\\times & \\times & \\times & \\times & & & \\\\\n\\times & \\times & \\times & \\times & & & \\\\\n\\times & \\times & \\times & \\times & & & \\\\\n\\times & \\times & \\times & \\times & & & \\\\\n & & & & \\times & \\times & \\times \\\\\n & & & & \\times & \\times & \\times \\\\\n & & & & \\times & \\times & \\times \\end{pmatrix}}_\\text{block diagonal} \n\\quad + \\quad\n\\underbrace{\\begin{pmatrix}\n & & & & \\cdot & \\cdot & \\cdot \\\\\n & & & & \\cdot & \\cdot & \\cdot \\\\\n & & & & \\cdot & \\cdot & \\cdot \\\\\n & & & & \\cdot & \\cdot & \\cdot \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & & & \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & & & \\\\\n\\cdot & \\cdot & \\cdot & \\cdot & & & \\end{pmatrix}}_\\text{block off-diagonal}\n$$", "_____no_output_____" ], [ "Block-diagonalizing $H$ consists of finding an operator $S$ such that\n\n$$\nH_{\\rm eff} = e^{iS} H e^{-iS} = \\sum_{m=0}^\\infty \\frac{1}{m!} [H, S]^{(m)} = \\sum_{m=0}^\\infty \\lambda^m H^{(m)},\n$$\n\nwhere $H^{(m)}$ are successive approximations to $H$ (with $H^{(0)} = H_0$) and the generalized commutator is defined resursively as \n\n$$\n[H,S]^{(m)} = [[H,S]^{(m-1)},S] \\qquad {\\rm with} \\qquad [H,S]^{(0)} = H. \n$$\n\nHere we treat $S$ as a Taylor series with\n\n$$ S = \\sum_{m=1}^\\infty \\lambda^m S^{(m)} $$\n\nto keep track of the order $\\lambda$. Then expanding the effective Hamiltonian as a perturbation of $H_1+H_2$ to second order in $\\lambda$,\n\n$$\nH_{\\rm eff} = H_0 + \\lambda (H_1+H_2) + \\left[H_0 + \\lambda(H_1+H_2), \\lambda S^{(1)}\\right] \n + \\frac{1}{2} \\left[ \\left[ H_0 + \\lambda(H_1+H_2), \\lambda S^{(1)}\\right], \\lambda S^{(1)}\\right]\n + \\left[H_0 + \\lambda(H_1+H_2), \\lambda^2 S^{(2)}\\right] + \\ldots \\\\\n \\approx H_0 + \\lambda \\left( H_1 + H_2 + \\left[H_0, S^{(1)}\\right] \\right) \n + \\lambda^2 \\left( \\left[H_1+H_2, S^{(1)}\\right] + \\frac{1}{2} \\left[ \\left[H_0, S^{(1)},\\right] S^{(1)}\\right] + \\left[H_0, S^{(2)}\\right]\\right)\n$$\n\nNow we know $S$ must be block off-diagonal and anti-hermitian to force the block off-diagonal elements of $H_{\\rm eff}$ to vanish, we must have that\n\n$$\nH_{\\rm eff}^{\\rm off-diag} = \\sum_{m=0}^\\infty \\frac{1}{(2m+1)!} [\\underbrace{H_0 + H_1}_\\text{block diag}, S]^{(2m+1)} + \\sum_{m=0}^\\infty \\frac{1}{(2m)!} [\\underbrace{H_2}_\\text{block off-diag}, S]^{(2m)} \\equiv 0,\n$$\n\nnoting that all the terms in the first series are block off-diagonal and all of those in the second series are block diagonal. This is because the commutator of a block diagonal and block off-diagonal matrix is block off-diagonal and the commutator of two block off-diagonal matrices is block diagonal. Expanding this to the generalized commutator, we can see that $[H^0 + H^1, S]^{(n)}$ with odd $n$ must always be block off-diagonal as well as $[H^2, S]^{(n)}$ with even $n$. Now expanding the off-diagonal part of the Hamiltonain to second order yields\n\n$$\nH_{\\rm eff}^{\\rm off-diag} = \\left[ H_0 + \\lambda H_1, \\lambda S^{(1)} \\right]+\\lambda H_2 + \\left[H_0 + \\lambda H_1, \\lambda^2 S^{(2)}\\right]\n + \\frac{1}{3!} \\left[ H_0+\\lambda H_1, \\lambda S^{(1)}\\right]^{(3)} + \\frac{1}{2!} \\left[ \\lambda H_2, \\lambda S^{(1)}\\right]^{(2)} \\\\\n = \\lambda \\left( \\left[ H_0, S^{(1)} \\right] + H_2 \\right) + \\lambda^2 \\left( \\left[H_1, S^{(1)} \\right] + \\left[H_0, S^{(2)}\\right]\\right) + \\ldots.\n$$\nSince each order of $\\lambda$ must be identically zero, the following equations determine $S^{(m)}$,\n$$\n[H_0, S^{(1)}] = -H_2 \\qquad\n[H_0, S^{(2)}] = -[H_1, S^{(1)}] \\qquad\n[H_0, S^{(3)}] = -[H_1, S^{(2)}] - \\frac{1}{3} [[H_2, S^{(1)}], S^{(1)}],\n$$\nwhere our ansatz that satisfied these equations is guaranteed unique by Winkler's work. Then our effective Hamiltonian becomes\n\n$$\nH_{\\rm eff} = H_0+H_1+[H_2,S^{(1)}] + \\frac{1}{2} [[H_0, S^{(1)}], S^{(1)}] + \\ldots = H_0+H_1+\\frac{1}{2}[H_2,S^{(1)}] + \\ldots\n$$\n\nwhere the effective Hamiltonian is calculated here to second order and we have taken $\\lambda \\to 1$.", "_____no_output_____" ], [ "## 3. Block-diagonalization of the Jaynes-Cummings Hamiltonian <a id='bdotjch'></a>\n\nUsing the S-W transformation consists of two problems: 1) finding the correct ansatz, and 2) performing the calculations. In most examples, an ansatz of similar form (i.e. anti-hermitian) to the off-diagonal parts is made and confirmed *a postori*. Recently, the manuscript [A Systematic Method for Schrieffer-Wolff Transformation and Its Generalizations](http://www.arxiv.org/abs/2004.06534) has appeared on the arXiv attesting to systematically providing the ansatz and applying it to numerous systems (including the Jaynes-Cumming Hamiltonian below).\n\nAs such, the *generator* $\\eta$ is calculated as $\\eta = [H_0, H_2]$. In keeping the scalar coefficients of $\\eta$ undetermined, then $S^{(1)}$ can be calculated as the specific $\\eta$ that satisfies $[H_0, \\eta]=H_2$. Note the hermiticity of $H_0$ and $H_2$ guarantee the anti-hermiticity of $\\eta$ and thus $S^{(1)}$. \n\nFor ease of tedious calculations, we will use the Python package [`sympy`](http://www.sympy.org) for symbolic mathematics.", "_____no_output_____" ] ], [ [ "# import SymPy and define symbols\nimport sympy as sp\nsp.init_printing(use_unicode=True)\nwr = sp.Symbol('\\omega_r') # resonator frequency\nwq = sp.Symbol('\\omega_q') # qubit frequency\ng = sp.Symbol('g', real=True) # vacuum Rabi coupling\nDelta = sp.Symbol('Delta', real=True) # wr - wq; defined later", "_____no_output_____" ], [ "# import operator relations and define them\nfrom sympy.physics.quantum.boson import BosonOp\na = BosonOp('a') # resonator photon annihilation operator\nfrom sympy.physics.quantum import pauli, Dagger, Commutator\nfrom sympy.physics.quantum.operatorordering import normal_ordered_form\n\n# Pauli matrices\nsx = pauli.SigmaX()\nsy = pauli.SigmaY()\nsz = pauli.SigmaZ()\n\n# qubit raising and lowering operators\nsplus = pauli.SigmaPlus()\nsminus = pauli.SigmaMinus()", "_____no_output_____" ], [ "# define J-C Hamiltonian in terms of diagonal and non-block diagonal terms\nH0 = wr*Dagger(a)*a - (1/2)*wq*sz; \nH2 = g*(Dagger(a)*sminus + a*splus); \nHJC = H0 + H2; HJC # print", "_____no_output_____" ], [ "# using the above method for finding the ansatz\neta = Commutator(H0, H2); eta", "_____no_output_____" ] ], [ [ "As a note about `sympy`, we will need to used the methods `doit()`, `expand`, `normal_ordered_form`, and `qsimplify_pauli` to proceed with actually taking the commutator, expanding it into terms, normal ordering the bosonic modes (creation before annihilation), and simplify the Pauli algebra. Trying this with $\\eta$ yields", "_____no_output_____" ] ], [ [ "pauli.qsimplify_pauli(normal_ordered_form(eta.doit().expand()))", "_____no_output_____" ] ], [ [ "Now take $A$ and $B$ as the coefficients of $a^\\dagger \\sigma_-$ and $a\\sigma_+$, respectively. Then the commutator", "_____no_output_____" ] ], [ [ "A = sp.Symbol('A')\nB = sp.Symbol('B')\neta = A * Dagger(a) * sminus - B * a * splus;\npauli.qsimplify_pauli(normal_ordered_form(Commutator(H0, eta).doit().expand()))", "_____no_output_____" ] ], [ [ "This expression should be equal to $H_2$", "_____no_output_____" ] ], [ [ "H2", "_____no_output_____" ] ], [ [ "which implies $A = B = g/\\Delta$ where $\\Delta = \\omega_r - \\omega_q$ is the frequency detuning between the resonator and qubit. Therefore our $S^{(1)}$ is determined to be", "_____no_output_____" ] ], [ [ "S1 = eta.subs(A, g/Delta)\nS1 = S1.subs(B, g/Delta); S1.factor()", "_____no_output_____" ] ], [ [ "Then we can calculate the effective second order correction to $H_0$", "_____no_output_____" ] ], [ [ "Heff = H0 + 0.5*pauli.qsimplify_pauli(normal_ordered_form(Commutator(H2, S1).doit().expand())).simplify(); Heff", "_____no_output_____" ] ], [ [ "This is typically written as\n$$\nH_{\\rm eff} = \\left(\\omega_r + \\frac{g^2}{\\Delta}\\sigma_z\\right)a^\\dagger a - \\frac{1}{2}\\left(\\omega_q -\\frac{g^2}{\\Delta}\\right) \\sigma_z\n$$\nwhich shows a state-dependent shift by $\\chi \\equiv g^2/\\Delta$ of the resonator frequency called the *ac Stark shift* and a shift in qubit frequency due to quantum vacuum fluctuations called the *Lamb shift*.", "_____no_output_____" ], [ "## 4. Full Transmon <a id='full-transmon'></a>\n\nBecause we are using *transmons* instead of *qubits*, we need to be careful to take the higher-order energy terms into effect when designing and simulating devices. The full transmon Hamiltonian coupled to the readout resonators is\n\n$$\nH^{\\rm tr} = \\omega_r a^\\dagger a + \\sum_j \\omega_j |j\\rangle\\langle j| + g\\left(a^\\dagger c + ac^\\dagger \\right),\n$$\n\nwhere now $c = \\sum_j \\sqrt{j+1}|j\\rangle\\langle j+1|$ is the transmon lowering operator. Similarly, taking the weakly interacting subsets $A$ as the even-numbered transmon modes and $B$ as the odd-numbered transmon modes. Using the ansatz\n\n$$\nS^{(1)} = \\sum_j \\alpha_j a^\\dagger \\sqrt{j+1}|j\\rangle\\langle j+1| - \\alpha_j^* a \\sqrt{j+1}|j+1\\rangle\\langle j|,\n$$\n\none may proceed along a messier version of the Jaynes-Cummings Hamiltonian. With some effort one can show the second order effective Hamiltonian is\n\n$$\nH^{\\rm tr}_{\\rm eff} = \\left( \\omega_r + \\sum_j \\frac{g^2(\\omega_r-\\omega+\\delta)}{(\\omega_r-\\omega-\\delta j)(\\omega_r - \\omega - \\delta(j-1))} |j\\rangle\\langle j| \\right) a^\\dagger a + \\sum_j \n\\left[\nj\\omega + \\frac{\\delta}{2} (j-1)j + \\frac{jg^2}{\\omega-\\omega_r+(j-1)\\delta} \\right]|j\\rangle\\langle j|.\n$$", "_____no_output_____" ], [ "## 5. Qubit Drive with cQED <a id='qdwcqed'></a>\n\nFollowing that of [Blais *et al* (2004)](https://arxiv.org/abs/cond-mat/0402216), we model the drive Hamiltonian as\n$$\nH^d(t) = \\xi(t)\\left( a^\\dagger e^{-i\\omega_d t} + ae^{i\\omega_d t}\\right).\n$$\nFollowing the treatment in the [Ph.D. dissertation of Lev Bishop](https://arxiv.org/abs/1007.3520), the drive acts on the qubit via the Glauber operator\n$$\nD(\\alpha) = e^{\\alpha(t) a^\\dagger - \\alpha^*(t) a}.\n$$\nMoving to the Jaynes-Cumming Hamiltonian rotating at the drive frequency,\n$$\nH = \\Delta_r a^\\dagger a - \\frac{1}{2} \\Delta_q \\sigma^z + g(a^\\dagger \\sigma^- + a\\sigma^+) + \\xi(t)(a^\\dagger + a)\n$$\nwith $\\Delta_r = \\omega_r - \\omega_d$ and $\\Delta_q = \\omega_q - \\omega_d$. Applying Hadamard's Lemma to nested commutators,\n$$\ne^{A}BA^{-A} = B + [A,B] + \\frac{1}{2!} [A,[A,B]] + \\frac{1}{3!}[A,[A,[A,B]]] + \\ldots\n$$\nwe see that\n$$\nD^\\dagger a^{(\\dagger)} D = \\exp\\{-\\alpha(t) a^\\dagger + \\alpha^*(t) a\\} a^{(\\dagger)}\\exp\\{\\alpha(t) a^\\dagger - \\alpha^*(t) a\\} \n= a^{(\\dagger)} + \\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, a^{(\\dagger)}\\right] + \\frac{1}{2!}\\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, \\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, a^{(\\dagger)}\\right]\\right] + \\ldots\n = a^{(\\dagger)} + \\alpha^{(*)}\n$$\nand\n\n$$\nD^\\dagger a^\\dagger a D = a^\\dagger a + \\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, a^\\dagger a\\right] + \\frac{1}{2!}\\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, \\left[-\\alpha(t) a^\\dagger + \\alpha^*(t) a, a^\\dagger a\\right]\\right] + \\ldots\n= a^\\dagger a + \\alpha(t)a^\\dagger + \\alpha^*(t)a + |\\alpha(t)|^2\n$$\n\nSo that we can transform the Hamiltonian\n\n$$\n\\tilde{H} = D^\\dagger H D - iD^\\dagger \\dot{D} = \\Delta_r\\left(a^\\dagger a + \\alpha(t)a^\\dagger + \\alpha^*(t)a + |\\alpha(t)|^2\\right) - \\frac{1}{2} \\Delta_q \\sigma^z \\\\ + g\\left((a^\\dagger + \\alpha^*(t))\\sigma^- + (a+\\alpha(t))\\sigma^+\\right) + \\xi(t)\\left(a^\\dagger + \\alpha^*(t) + a + \\alpha(t) \\right) - i\\left(\\dot{\\alpha}(t) a^\\dagger - \\dot{\\alpha}^*(t) a\\right) \\\\\n= \\Delta_r a^\\dagger a - \\frac{1}{2}\\Delta_q \\sigma^z + g\\left((a^\\dagger + \\alpha^*(t))\\sigma^- + (a+\\alpha(t))\\sigma^+\\right) \\\\\n+\\xi(t)\\left(a^\\dagger + a \\right) + \\Delta_r\\left(\\alpha(t)a^\\dagger + \\alpha^*(t)a\\right)- i\\left(\\dot{\\alpha}(t) a^\\dagger - \\dot{\\alpha}^*(t) a\\right)\n$$\n\nwhere the non-operator terms have been dropped. The last line can be set to zero if we choose\n\n$$\n-i\\dot{\\alpha}(t) + \\Delta_r \\alpha(t) + \\xi(t) = 0,\n$$\n\nand finally introducting the Rabi frequency $\\Omega(t) = 2g\\alpha(t)$, we arrive at\n\n$$\n\\tilde{H} = \\Delta_r a^\\dagger a - \\frac{1}{2}\\Delta_q \\sigma^z + g\\left(a^\\dagger\\sigma^- + a\\sigma^+\\right)\n+\\frac{1}{2} \\left( \\Omega^*(t)\\sigma^- + \\Omega(t) \\sigma^+\\right).\n$$\n\nSince the drive part of the Hamiltonian is block off-diagonal, we can perform a Schrieffer-Wolff transformation on it (for a real drive $\\Omega^*(t) = \\Omega(t)$) and add it to the effective Hamiltonian,\n\n$$\n[\\tilde{H}^d, S^{(1)}] = -\\frac{\\Omega(t)}{2} \\left[ (\\sigma^- + \\sigma^+),\\frac{g}{\\Delta}\\left( a^\\dagger \\sigma^- - a\\sigma^+\\right)\\right] = \\frac{g\\Omega(t)}{2\\Delta}(a + a^\\dagger)\\sigma^z\n$$\n\nso the effective Hamiltonian becomes\n\n$$\n\\tilde{H}_{\\rm eff} = \\left( \\Delta_r + \\frac{g^2}{\\Delta}\\sigma^z\\right) a^\\dagger a - \\frac{1}{2}\\left(\\Delta_q - \\frac{g^2}{\\Delta}\\right) \\sigma^z + \\frac{\\Omega(t)}{2}\\sigma^x\n+ \\frac{g\\Omega(t)}{4\\Delta}(a + a^\\dagger)\\sigma^z.\n$$\n\nNote here that to eliminate the $z$ rotations, one should drive at the Lamb-shifted qubit frequency. The additional $\\sigma^z$ term is small because $\\Delta \\gg g$ in the dispersive regime.", "_____no_output_____" ], [ "## 6. The Cross Resonance Entangling Gate <a id='tcreg'></a>\n\nDriving qubit one at the frequency of qubit two can be written as\n\n$$\nH^d(t) = \\frac{\\Omega(t)}{2} \\left( \\sigma_1^+ e^{-i\\tilde{\\omega}_2 t} + \\sigma_1^- e^{i\\tilde{\\omega}_2 t}\\right).\n$$\n\nNow, we need to apply Schrieffer-Wolff to the drive term to get the effective Hamiltonian, and then do the RWA at frequency $\\tilde{\\omega}_2$.\n\n$$\n[\\tilde{H}^d, S^{(1)}] = -\\frac{J\\Omega(t)}{2\\Delta_{12}} \\left[ \\sigma_1^+ e^{-i\\tilde{\\omega}_2 t} + \\sigma_1^- e^{i\\tilde{\\omega}_2 t}, \\sigma_1^+ \\sigma_2^- - \\sigma_2^+ \\sigma_1^-\\right] \n=-\\frac{J\\Omega(t)}{2\\Delta_{12}} \\left(\\sigma_1^z \\sigma_2^+ e^{-i\\tilde{\\omega}_2 t}\n+\\sigma_1^z \\sigma_2^- e^{i\\tilde{\\omega}_2 t} \\right)\n$$\n\nTransforming back the the rotating frame at $\\omega_2$, we get the effective qubit cross resonance Hamiltonian\n\n$$\n\\tilde{H}_{\\rm eff}^{\\rm CR} = - \\frac{\\tilde{\\omega}_1-\\tilde{\\omega}_2}{2}\\sigma_1^z \n+ \\frac{\\Omega(t)}{2} \\left(\\sigma_2^x - \\frac{J}{2\\Delta_{12}} \\sigma_1^z \\sigma_2^x \\right).\n$$\n\nThe first two terms involve the $ZI$ interaction due to a Stark shift on qubit 1 and an unconditional $IX$ rotation on qubit 2, but the final term represents the $ZX$-interaction that produces entanglement. By putting qubit 1 into a an equal superposition of $|0\\rangle$ and $|1\\rangle$ and applying the cross resonance gate for a duration corresponding to a $\\pi/2$ rotation around the $x$-axis, a maximally entangled state is produced. Using Qiskit to characterize the two-qubit cross resonance Hamiltonian for transmons can be done with [this tutorial](https://github.com/Qiskit/qiskit-tutorials/blob/9405254b38312771f8d5c2dd6f451cec35307995/tutorials/noise/1_hamiltonian_and_gate_characterization.ipynb). Further reading on the cross resonance gate is found [here](https://arxiv.org/abs/1106.0553) and [here](https://arxiv.org/abs/1603.04821).\n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ] ]
d0153a9a44fdb82dacfa08df9760f4d714e5d39d
28,712
ipynb
Jupyter Notebook
doc/source/methods/Anchors.ipynb
mauicv/alibi
30fea76391c255963c8818c2b54aa615b0d6f858
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
doc/source/methods/Anchors.ipynb
mauicv/alibi
30fea76391c255963c8818c2b54aa615b0d6f858
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
doc/source/methods/Anchors.ipynb
mauicv/alibi
30fea76391c255963c8818c2b54aa615b0d6f858
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
46.459547
1,185
0.660595
[ [ [ "[[source]](../api/alibi.explainers.anchor_tabular.rst)", "_____no_output_____" ], [ "# Anchors", "_____no_output_____" ], [ "## Overview", "_____no_output_____" ], [ "The anchor algorithm is based on the [Anchors: High-Precision Model-Agnostic Explanations](https://homes.cs.washington.edu/~marcotcr/aaai18.pdf) paper by Ribeiro et al. and builds on the open source [code](https://github.com/marcotcr/anchor) from the paper's first author.\n\nThe algorithm provides model-agnostic (*black box*) and human interpretable explanations suitable for classification models applied to images, text and tabular data. The idea behind anchors is to explain the behaviour of complex models with high-precision rules called *anchors*. These anchors are locally sufficient conditions to ensure a certain prediction with a high degree of confidence.\n\nAnchors address a key shortcoming of local explanation methods like [LIME](https://arxiv.org/abs/1602.04938) which proxy the local behaviour of the model in a linear way. It is however unclear to what extent the explanation holds up in the region around the instance to be explained, since both the model and data can exhibit non-linear behaviour in the neighborhood of the instance. This approach can easily lead to overconfidence in the explanation and misleading conclusions on unseen but similar instances. The anchor algorithm tackles this issue by incorporating coverage, the region where the explanation applies, into the optimization problem. A simple example from sentiment classification illustrates this (Figure 1). Dependent on the sentence, the occurrence of the word *not* is interpreted as positive or negative for the sentiment by LIME. It is clear that the explanation using *not* is very local. Anchors however aim to maximize the coverage, and require *not* to occur together with *good* or *bad* to ensure respectively negative or positive sentiment.\n\n", "_____no_output_____" ], [ "![LIMEsentiment](lime_sentiment.png)\n\nRibeiro et al., *Anchors: High-Precision Model-Agnostic Explanations*, 2018", "_____no_output_____" ], [ "As highlighted by the above example, an anchor explanation consists of *if-then rules*, called the anchors, which sufficiently guarantee the explanation locally and try to maximize the area for which the explanation holds. This means that as long as the anchor holds, the prediction should remain the same regardless of the values of the features not present in the anchor. Going back to the sentiment example: as long as *not good* is present, the sentiment is negative, regardless of the other words in the movie review.", "_____no_output_____" ], [ "### Text", "_____no_output_____" ], [ "For text classification, an interpretable anchor consists of the words that need to be present to ensure a prediction, regardless of the other words in the input. The words that are not present in a candidate anchor can be sampled in 3 ways:\n\n* Replace word token by UNK token.\n\n* Replace word token by sampled token from a corpus with the same POS tag and probability proportional to the similarity in the embedding space. By sampling similar words, we keep more context than simply using the UNK token.\n\n* Replace word tokens with sampled tokens according to the masked language model probability distribution. The tokens can be sampled in parallel, independent of one another, or sequentially(autoregressive), conditioned on the previously generated tokens.", "_____no_output_____" ], [ "### Tabular Data", "_____no_output_____" ], [ "Anchors are also suitable for tabular data with both categorical and continuous features. The continuous features are discretized into quantiles (e.g. deciles), so they become more interpretable. The features in a candidate anchor are kept constant (same category or bin for discretized features) while we sample the other features from a training set. As a result, anchors for tabular data need access to training data. Let's illustrate this with an example. Say we want to predict whether a person makes less or more than £50,000 per year based on the person's characteristics including age (continuous variable) and marital status (categorical variable). The following would then be a potential anchor: Hugo makes more than £50,000 because he is married and his age is between 35 and 45 years.", "_____no_output_____" ], [ "### Images", "_____no_output_____" ], [ "Similar to LIME, images are first segmented into superpixels, maintaining local image structure. The interpretable representation then consists of the presence or absence of each superpixel in the anchor. It is crucial to generate meaningful superpixels in order to arrive at interpretable explanations. The algorithm supports a number of standard image segmentation algorithms ([felzenszwalb, slic and quickshift](https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_segmentations.html#sphx-glr-auto-examples-segmentation-plot-segmentations-py)) and allows the user to provide a custom segmentation function.\n\nThe superpixels not present in a candidate anchor can be masked in 2 ways:\n\n* Take the average value of that superpixel.\n\n* Use the pixel values of a superimposed picture over the masked superpixels.", "_____no_output_____" ], [ "![anchorimage](anchor_image.png)\n\nRibeiro et al., *Anchors: High-Precision Model-Agnostic Explanations*, 2018", "_____no_output_____" ], [ "### Efficiently Computing Anchors", "_____no_output_____" ], [ "The anchor needs to return the same prediction as the original instance with a minimal confidence of e.g. 95%. If multiple candidate anchors satisfy this constraint, we go with the anchor that has the largest coverage. Because the number of potential anchors is exponential in the feature space, we need a faster approximate solution.\n\nThe anchors are constructed bottom-up in combination with [beam search](https://en.wikipedia.org/wiki/Beam_search). We start with an empty rule or anchor, and incrementally add an *if-then* rule in each iteration until the minimal confidence constraint is satisfied. If multiple valid anchors are found, the one with the largest coverage is returned.\n\nIn order to select the best candidate anchors for the beam width efficiently during each iteration, we formulate the problem as a [pure exploration multi-armed bandit](https://www.cse.iitb.ac.in/~shivaram/papers/kk_colt_2013.pdf) problem. This limits the number of model prediction calls which can be a computational bottleneck.\n\nFor more details, we refer the reader to the original [paper](https://homes.cs.washington.edu/~marcotcr/aaai18.pdf).", "_____no_output_____" ], [ "## Usage", "_____no_output_____" ], [ "While each data type has specific requirements to initialize the explainer and return explanations, the underlying algorithm to construct the anchors is the same.\n\nIn order to efficiently generate anchors, the following hyperparameters need to be set to sensible values when calling the `explain` method:\n\n* `threshold`: the previously discussed minimal confidence level. `threshold` defines the minimum fraction of samples for a candidate anchor that need to lead to the same prediction as the original instance. A higher value gives more confidence in the anchor, but also leads to more computation time. The default value is 0.95.\n\n* `tau`: determines when we assume convergence for the multi-armed bandit. A bigger value for `tau` means faster convergence but also looser anchor conditions. By default equal to 0.15.\n\n* `beam_size`: the size of the beam width. A bigger beam width can lead to a better overall anchor at the expense of more computation time.\n\n* `batch_size`: the batch size used for sampling. A bigger batch size gives more confidence in the anchor, again at the expense of computation time since it involves more model prediction calls. The default value is 100.\n\n* `coverage_samples`: number of samples used to compute the coverage of the anchor. By default set to 10000.", "_____no_output_____" ], [ "### Text", "_____no_output_____" ], [ "#### Predictor\n\nSince the explainer works on black-box models, only access to a predict function is needed. The model below is a simple logistic regression trained on movie reviews with negative or positive sentiment and pre-processed with a CountVectorizer:\n\n```python\npredict_fn = lambda x: clf.predict(vectorizer.transform(x))\n```", "_____no_output_____" ], [ "#### Simple sampling strategies\n\n`AnchorText` provides two simple sampling strategies: `unknown` and `similarity`. Randomly chosen words, except those in queried anchor, are replaced by the `UNK` token for the `unknown` strategy, and by similar words with the same part of speech of tag for the `similarity` strategy.\n\nTo perform text tokenization, pos-tagging, compute word similarity, etc., we use spaCy. The spaCy model can be loaded as follows:\n \n```python\nimport spacy\nfrom alibi.utils import spacy_model\n\nmodel = 'en_core_web_md'\nspacy_model(model=model)\nnlp = spacy.load(model)\n```", "_____no_output_____" ], [ "If we choose to replace words with the `UNK` token, we define the explainer as follows:\n\n```python\nexplainer = AnchorText(predictor=predict_fn, sampling_strategy='unknown', nlp=nlp)\n```", "_____no_output_____" ], [ "Likewise, if we choose to sample similar words from a corpus, we define the explainer as follows:\n\n```python\nexplainer = AnchorText(predictor=predict_fn, sampling_strategy='similarity', nlp=nlp)\n```", "_____no_output_____" ], [ "#### Language model\n\n`AnchorText` provides the option to define the perturbation distribution through a `language_model` sampling strategy. In this case, randomly chosen words, except those in the queried anchor, are replaced by words sampled according to the language model's predictions. We provide support for three transformer based language models: `DistilbertBaseUncased`, `BertBaseUncased`, and `RobertaBase`.\n\nA language model can be loaded as follows:\n\n```python\nlanguage_model = DistilbertBaseUncased()\n```", "_____no_output_____" ], [ "Then we can initialize the explainer as follows:\n\n```python\nexplainer = AnchorText(predictor=predict_fn, sampling_strategy=\"language_model\", \n language_model=language_model)\n```", "_____no_output_____" ], [ "#### Sampling parameters\n\nParameters specific to each sampling strategy can be passed to the constructor via `kwargs`. For example:\n\n* If `sampling_strategy=\"unknown\"` we can initialize the explainer as follows:\n\n```python\nexplainer = AnchorText(\n predictor=predict_fn, \n sampling_strategy='unknown', # replace a word by UNK token\n nlp=nlp, # spacy object\n sample_proba=0.5, # probability of a word to be replaced by UNK token\n)\n```\n\n* If `sampling_strategy=\"similarity\"` we can initialize the explainer as follows:\n\n```python\nexplainer = AnchorText(\n predictor=predict_fn, \n sampling_strategy='similarity', # replace a word by similar words\n nlp=nlp, # spacy object\n sample_proba=0.5, # probability of a word to be replaced by as similar word\n use_proba=True, # sample according to the similarity distribution\n top_n=20, # consider only top 20 most similar words\n temperature=0.2 # higher temperature implies more randomness when sampling\n)\n```\n\n* Or if `sampling_strategy=\"language_model\"`, the explainer can be defined as:\n\n```python\nexplainer = AnchorText(\n predictor=predict_fn,\n sampling_strategy=\"language_model\", # use language model to predict the masked words\n language_model=language_model, # language model to be used\n filling=\"parallel\", # just one pass through the transformer\n sample_proba=0.5, # probability of masking and replacing a word according to the LM \n frac_mask_templates=0.1, # fraction of masking templates \n use_proba=True, # use words distribution when sampling (if false sample uniform)\n top_n=50, # consider the fist 50 most likely words\n temperature=0.2, # higher temperature implies more randomness when sampling\n stopwords=['and', 'a', 'but'], # those words will not be masked/disturbed\n punctuation=string.punctuation, # punctuation tokens contained here will not be masked/disturbed\n sample_punctuation=False, # if False tokens included in `punctuation` will not be sampled \n batch_size_lm=32 # batch size used for the language model\n)\n```", "_____no_output_____" ], [ "Words outside of the candidate anchor can be replaced by `UNK` token, similar words, or masked out and replaced by the most likely words according to language model prediction, with a probability equal to `sample_proba`. We can sample the *top n* most similar words or the *top n* most likely language model predictions by setting the `top_n` parameter. We can put more weight on similar or most likely words by decreasing the `temperature` argument. It is also possible to sample words from the corpus proportional to the word similarity with the ground truth word or according to the language model's conditional probability distribution by setting `use_proba` to `True`. Furthermore, we can avoid masking specific words by including them in the `stopwords` list.\n\nWorking with transformers can be computationally and memory-wise expensive. For `sampling_strategy=\"language_model\"` we provide two methods to predict the masked words: `filling=\"parallel\"` and `filling=\"autoregressive\"`.\n\nIf `filling=\"parallel\"`, we perform a single forward pass through the transformer. After obtaining the probability distribution of the masked words, each word is sampled independently of the others.\n\nIf `filling=\"autoregressive\"`, we perform multiple forward passes through the transformer and generate the words one at a time. Thus, the masked words will be conditioned on the previous ones. **Note that this filling method is computationally expensive**.\n\nTo further decrease the explanation runtime, for `sampling_strategy=\"language_model\", filling=\"parallel\"`, we provide a secondary functionality through the `frac_mask_templates`. Behind the scenes, the anchor algorithm is constantly requesting samples to query the predictor. Thus, we need to generate what we call *mask templates*, which are sentences containing words outside the candidate anchors replaced by the `<MASK>` token. The `frac_mask_templates` controls the fraction of mask templates to be generated. For example, if we need to generate 100 samples and the `frac_mask_templates=0.1`, we will generate only 10 mask templates. Those 10 templates are then passed to the language model to predict the masked words. Having the distribution of each word in each mask template, we can generate 100 samples as requested. Note that instead of passing 100 masked sentences through the language model (which is expensive), we only pass 10 sentences. Although this can increase the speed considerably, it can also decrease the diversity of the samples. The maximum batch size used in a forward pass through the language model can be specified by setting `batch_size_lm`.\n\nWhen `sampling_strategy=\"language_model\"`, we can specify the `punctuation` considered by the sampling algorithm. Any token composed only from characters in the `punctuation` string, will not be perturbed (we call those *punctuation tokens*). Furthermore, we can decide whether to sample *punctuation tokens* by setting the `sample_punctuation` parameter. If `sample_punctuation=False`, then *punctuation tokens* will not be sampled.", "_____no_output_____" ], [ "#### Explanation", "_____no_output_____" ], [ "Let's define the instance we want to explain and verify that the sentiment prediction on the original instance is positive:\n\n```python\ntext = 'This is a good book .'\nclass_names = ['negative', 'positive']\npred = class_names[predict_fn([text])[0]]\n```\n\nNow we can explain the instance:\n\n```python\nexplanation = explainer.explain(text, threshold=0.95)\n```\n\nThe `explain` method returns an `Explanation` object with the following attributes:\n\n* *anchor*: a list of words in the anchor.\n\n* *precision*: the fraction of times the sampled instances where the anchor holds yields the same prediction as the original instance. The precision will always be $\\geq$ `threshold` for a valid anchor.\n\n* *coverage*: the fraction of sampled instances the anchor applies to.\n\nThe *raw* attribute is a dictionary which also contains example instances where the anchor holds and the prediction is the same as on the original instance, as well as examples where the anchor holds but the prediction changed to give the user a sense of where the anchor fails. *raw* also stores information on the *anchor*, *precision* and *coverage* of partial anchors. This allows the user to track the improvement in for instance the *precision* as more features (words in the case of text) are added to the anchor.", "_____no_output_____" ], [ "### Tabular Data", "_____no_output_____" ], [ "#### Initialization and fit", "_____no_output_____" ], [ "To initialize the explainer, we provide a predict function, a list with the feature names to make the anchors easy to understand as well as an optional mapping from the encoded categorical features to a description of the category. An example for `categorical_names` would be\n```python\ncategory_map = {0: [\"married\", \"divorced\"], 3: [\"high school diploma\", \"master's degree\"]}\n```\n \nEach key in *category_map* refers to the column index in the input for the relevant categorical variable, while the values are lists with the options for each categorical variable. To make it easy, we provide a utility function `gen_category_map` to generate this map automatically from a Pandas dataframe:\n\n```python\nfrom alibi.utils import gen_category_map\ncategory_map = gen_category_map(df)\n```\n\nThen initialize the explainer:\n```python\npredict_fn = lambda x: clf.predict(preprocessor.transform(x))\nexplainer = AnchorTabular(predict_fn, feature_names, categorical_names=category_map)\n```\n\nThe implementation supports one-hot encoding representation of the cateforical features by setting `ohe=True`. The `feature_names` and `categorical_names(category_map)` remain unchanged. The prediction function `predict_fn` should expect as input datapoints with one-hot encoded categorical features. To initialize the explainer with the one-hot encoding support: \n```python\nexplainer = AnchorTabular(predict_fn, feature_names, categorical_names=category_map, ohe=True)\n```", "_____no_output_____" ], [ "Tabular data requires a fit step to map the ordinal features into quantiles and therefore needs access to a representative set of the training data. `disc_perc` is a list with percentiles used for binning:\n\n```python\nexplainer.fit(X_train, disc_perc=[25, 50, 75])\n```\n\nNote that if one-hot encoding support is enabled (`ohe=True`), the `fit` calls expect the data to be one-hot encoded.", "_____no_output_____" ], [ "#### Explanation", "_____no_output_____" ], [ "Let's check the prediction of the model on the original instance and explain:\n\n```python\nclass_names = ['<=50K', '>50K']\npred = class_names[explainer.predict_fn(X)[0]]\nexplanation = explainer.explain(X, threshold=0.95)\n```\n\nThe returned `Explanation` object contains the same attributes as the text explainer, so you could explain a prediction as follows:\n\n```\nPrediction: <=50K\nAnchor: Marital Status = Never-Married AND Relationship = Own-child\nPrecision: 1.00\nCoverage: 0.13\n```\n\nNote that if one-hot encoding support is enabled (`ohe=True`), the `explain` calls expect the data to be one-hot encode.", "_____no_output_____" ], [ "### Images", "_____no_output_____" ], [ "#### Initialization", "_____no_output_____" ], [ "Besides the predict function, we also need to specify either a built in or custom superpixel segmentation function. The built in methods are [felzenszwalb](https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.felzenszwalb), [slic](https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.slic) and [quickshift](https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.quickshift). It is important to create sensible superpixels in order to speed up convergence and generate interpretable explanations. Tuning the hyperparameters of the segmentation method is recommended.\n\n```python\nexplainer = AnchorImage(predict_fn, image_shape, segmentation_fn='slic', \n segmentation_kwargs={'n_segments': 15, 'compactness': 20, 'sigma': .5}, \n images_background=None)\n```\n\nExample of superpixels generated for the Persian cat picture using the *slic* method:\n\n![persiancat](persiancat.png)\n![persiancatsegm](persiancatsegm.png)\n\nThe following function would be an example of a custom segmentation function dividing the image into rectangles.\n\n\n```python\ndef superpixel(image, size=(4, 7)):\n segments = np.zeros([image.shape[0], image.shape[1]])\n row_idx, col_idx = np.where(segments == 0)\n for i, j in zip(row_idx, col_idx):\n segments[i, j] = int((image.shape[1]/size[1]) * (i//size[0]) + j//size[1])\n return segments\n```\n\nThe `images_background` parameter allows the user to provide images used to superimpose on the masked superpixels, not present in the candidate anchor, instead of taking the average value of the masked superpixel. The superimposed images need to have the same shape as the explained instance.", "_____no_output_____" ], [ "#### Explanation", "_____no_output_____" ], [ "We can then explain the instance in the usual way:\n\n```python\nexplanation = explainer.explain(image, p_sample=.5)\n```\n\n`p_sample` determines the fraction of superpixels that are either changed to the average superpixel value or that are superimposed. \n\nThe `Explanation` object again contains information about the anchor's *precision*, *coverage* and examples where the anchor does or does not hold. On top of that, it also contains a masked image with only the anchor superpixels visible under the *anchor* attribute (see image below) as well as the image's superpixels under *segments*.\n\n![persiancatanchor](persiancatanchor.png)", "_____no_output_____" ], [ "## Examples", "_____no_output_____" ], [ "### Image", "_____no_output_____" ], [ "[Anchor explanations for ImageNet](../examples/anchor_image_imagenet.ipynb)\n\n[Anchor explanations for fashion MNIST](../examples/anchor_image_fashion_mnist.ipynb)", "_____no_output_____" ], [ "### Tabular Data", "_____no_output_____" ], [ "[Anchor explanations on the Iris dataset](../examples/anchor_tabular_iris.ipynb)\n\n[Anchor explanations for income prediction](../examples/anchor_tabular_adult.ipynb)", "_____no_output_____" ], [ "### Text", "_____no_output_____" ], [ "[Anchor explanations for movie sentiment](../examples/anchor_text_movie.ipynb)", "_____no_output_____" ] ] ]
[ "markdown" ]
[ [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ] ]
d0153bb8842405fb7876cbe15b2de2ceddfaf294
848,916
ipynb
Jupyter Notebook
assignment3/q1/q1.ipynb
824zzy/CSE5334_DataMining
7fd35462ef7789828e198ceb51072d000c6d9a6e
[ "MIT" ]
null
null
null
assignment3/q1/q1.ipynb
824zzy/CSE5334_DataMining
7fd35462ef7789828e198ceb51072d000c6d9a6e
[ "MIT" ]
null
null
null
assignment3/q1/q1.ipynb
824zzy/CSE5334_DataMining
7fd35462ef7789828e198ceb51072d000c6d9a6e
[ "MIT" ]
null
null
null
38.905408
25,936
0.538508
[ [ [ "import torch\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nfrom sklearn.metrics import accuracy_score\nimport numpy as np\nfrom torch.utils.tensorboard import SummaryWriter\nfrom tqdm.notebook import tqdm\ntorch.manual_seed(824)\nnp.random.seed(824)\nnp.set_printoptions(threshold=np.inf)\n\n# build train set\nmul1, sigma1 = [1, 0], [[1, 0.75], [0.75, 1]]\nmul2, sigma2 = [0, 1], [[1, 0.75], [0.75, 1]]\ntrain_size = 500\ntest_size = 250\n\ntrain1 = np.random.multivariate_normal(mean=mul1, cov=sigma1, size=train_size)\ntrain1_label = np.zeros((train_size, 1))\n\ntrain2 = np.random.multivariate_normal(mean=mul2, cov=sigma2, size=train_size)\ntrain2_label = np.ones((train_size, 1))\nX_train = np.vstack([train1, train2])\ny_train = np.vstack([train1_label, train2_label])\nprint(\"Train set samples: \\n\",X_train[:5], X_train[-5:])\nprint(\"Train set labels: \\n\", y_train[:5], y_train[-5:])\n\ntest1 = np.random.multivariate_normal(mean=mul1, cov=sigma1, size=test_size)\ntest1_label = np.zeros((test_size, 1))\ntest2 = np.random.multivariate_normal(mean=mul2, cov=sigma2, size=test_size)\ntest2_label = np.ones((test_size, 1))\nX_test = np.vstack([test1, test2])\ny_test = np.vstack([test1_label, test2_label])\nprint(\"Test set samples: \\n\", X_test[:5], X_test[-5:])\nprint(\"Test set labels: \\n\", y_test[:5], y_test[-5:])", "Train set samples: \n [[ 2.58404558 1.29692349]\n [ 1.17246022 1.19740188]\n [ 1.38016946 -0.43532193]\n [ 1.22871198 0.69240206]\n [-0.5638184 -0.03940703]] [[ 0.56631183 2.10367925]\n [-1.20375122 -0.1932487 ]\n [ 0.40916517 2.05219985]\n [-0.04775581 2.34133983]\n [ 0.4270832 0.57494098]]\nTrain set labels: \n [[0.]\n [0.]\n [0.]\n [0.]\n [0.]] [[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\nTest set samples: \n [[-0.01892415 -0.00839902]\n [ 2.26451904 1.09639391]\n [ 1.01819778 0.83869822]\n [ 2.85415183 0.99880958]\n [ 1.20108847 0.25357878]] [[ 1.69245774 2.16627834]\n [ 0.67392591 1.37774288]\n [-0.70985022 0.94157795]\n [ 1.36430594 2.20540076]\n [ 0.63321523 1.26577979]]\nTest set labels: \n [[0.]\n [0.]\n [0.]\n [0.]\n [0.]] [[1.]\n [1.]\n [1.]\n [1.]\n [1.]]\n" ], [ "num_epochs = 100000\nlearning_rates = [1, 0.1, 0.01, 0.001]", "_____no_output_____" ], [ "class LogisticReg(torch.nn.Module):\n def __init__(self):\n super(LogisticReg, self).__init__()\n self.fc = torch.nn.Linear(2, 1)\n \n def forward(self, x):\n x = self.fc(x)\n return F.sigmoid(x)\n\nclass NormDataset(torch.utils.data.Dataset):\n def __init__(self, x, y):\n self.len = x.shape[0]\n self.device = 'cuda'if torch.cuda.is_available() else 'cpu'\n self.x_data = torch.as_tensor(x, device=self.device, dtype=torch.float)\n self.y_data = torch.as_tensor(y, device=self.device, dtype=torch.float)\n \n def __getitem__(self, index):\n return self.x_data[index], self.y_data[index]\n \n def __len__(self):\n return self.len \n\nloss_func = torch.nn.BCELoss()", "_____no_output_____" ], [ "train_set, test_set = NormDataset(X_train, y_train), NormDataset(X_test, y_test)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=32, shuffle=False)\nX_test_tsr, y_test_tsr = Variable(torch.from_numpy(X_test).float(), requires_grad=False), Variable(torch.from_numpy(y_test).float(), requires_grad=False)\n\nwriter = SummaryWriter()\nfor lr in learning_rates:\n model = LogisticReg()\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n prev_norm, norms, cnt = torch.tensor(0), torch.tensor(0), 0\n print(\"Parameters before training:\")\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n for epoch in tqdm(range(num_epochs)):\n early_stop = False\n for i, data in enumerate(train_loader):\n X_train_tsr, y_train_tsr = data\n y_pred = model(X_train_tsr)\n loss = loss_func(y_pred, y_train_tsr)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n norms = torch.norm(model.fc.weight.grad)+torch.norm(model.fc.bias.grad)\n if prev_norm.data==norms.data and cnt<10:\n cnt += 1\n if cnt==10:\n print('Early stopping at {} epoch when norms={}'.format(epoch, norms.data))\n break\n writer.add_scalar('Loss/lr='+str(lr), loss, epoch)\n writer.add_scalar('GradNorm/lr='+str(lr), norms, epoch)\n prev_norm = norms\n test_pred = model.forward(X_test_tsr).data.numpy()\n test_pred = np.where(test_pred>0.5, 1., 0.)\n acc = accuracy_score(test_pred, y_test_tsr.data.numpy())\n print(\"\\nParameters after training:\")\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n print('\\nWhen lr={}, the accuracy is {}'.format(lr, acc))\n print('------'*10)", "Parameters before training:\nfc.weight tensor([[-0.1240, -0.1621]])\nfc.bias tensor([0.3630])\n" ], [ "train_set, test_set = NormDataset(X_train, y_train), NormDataset(X_test, y_test)\ntrain_loader = torch.utils.data.DataLoader(dataset=train_set, batch_size=1, shuffle=False)\nX_test_tsr, y_test_tsr = Variable(torch.from_numpy(X_test).float(), requires_grad=False), Variable(torch.from_numpy(y_test).float(), requires_grad=False)\n\nwriter = SummaryWriter()\nfor lr in learning_rates:\n model = LogisticReg()\n optimizer = torch.optim.SGD(model.parameters(), lr=lr)\n prev_norm, norms, cnt = torch.tensor(0), torch.tensor(0), 0\n print(\"Parameters before training:\")\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n for epoch in tqdm(range(num_epochs)):\n early_stop = False\n for i, data in enumerate(train_loader):\n X_train_tsr, y_train_tsr = data\n y_pred = model(X_train_tsr)\n loss = loss_func(y_pred, y_train_tsr)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n norms = torch.norm(model.fc.weight.grad)+torch.norm(model.fc.bias.grad)\n if prev_norm.data==norms.data and cnt<10:\n cnt += 1\n if cnt==10:\n print('Early stopping at {} epoch when norms={}'.format(epoch, norms.data))\n break\n writer.add_scalar('Loss/lr='+str(lr), loss, epoch)\n writer.add_scalar('GradNorm/lr='+str(lr), norms, epoch)\n prev_norm = norms\n test_pred = model.forward(X_test_tsr).data.numpy()\n test_pred = np.where(test_pred>0.5, 1., 0.)\n acc = accuracy_score(test_pred, y_test_tsr.data.numpy())\n print(\"\\nParameters after training:\")\n for name, param in model.named_parameters():\n if param.requires_grad:\n print(name, param.data)\n print('\\nWhen lr={}, the accuracy is {}'.format(lr, acc))\n print('------'*10)", "Parameters before training:\nfc.weight tensor([[-0.7055, 0.0255]])\nfc.bias tensor([-0.2273])\n" ], [ "import matplotlib.pyplot as plt\n# Visualizations\n\ndef plot_decision_boundary(X, y):\n # Set min and max values and give it some padding\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points with distance h between them\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n test_grid = np.c_[xx.ravel(), yy.ravel()]\n test_grid_tsr = torch.from_numpy(test_grid).type(torch.FloatTensor)\n Z = model(test_grid_tsr)\n Z = np.where(Z>0.5, 1., 0.)\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(test1[:, 0], test1[:, 1], label='$\\mu_1, \\sigma_1$')\n plt.scatter(test2[:, 0], test2[:, 1], label='$\\mu_2, \\sigma_2$')\n plt.legend()\n\nplot_decision_boundary(X_test, y_test)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code" ] ]
d0154d398f24e2ab10846fc7414cf974e8aa14d9
16,284
ipynb
Jupyter Notebook
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
fecc6184183558f7fa0ad9744909e11ec7c2e5b6
[ "MIT" ]
null
null
null
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
fecc6184183558f7fa0ad9744909e11ec7c2e5b6
[ "MIT" ]
null
null
null
Scala-basics.ipynb
FranciscoJavierMartin/Notebooks
fecc6184183558f7fa0ad9744909e11ec7c2e5b6
[ "MIT" ]
null
null
null
25.603774
397
0.548637
[ [ [ "# Tutorial sobre Scala", "_____no_output_____" ], [ "## Declaraciones", "_____no_output_____" ], [ "### Declaración de variables", "_____no_output_____" ], [ "Existen dos categorias de variables: inmutables y mutables. Las variables mutables son aquellas en las que es posible modificar el contenido de la variable. Las variables inmutables son aquellas en las que no es posible alterar el contenido de las variables, se recomienda el uso de esta ultima. La declaración del tipo de la variable es opcional, Scala es capaz de inferir el tipo del dato.", "_____no_output_____" ] ], [ [ "//Variable inmutable\nval a:Int=1\n\n//variable mutable\nvar b:Int=2", "_____no_output_____" ] ], [ [ "### Tipos de datos", "_____no_output_____" ], [ "![Diagramas de tipos de datos](https://www.scala-lang.org/old/sites/default/files/images/classhierarchy.png)", "_____no_output_____" ], [ "Siempre que se infiere un tipo en Scala, el tipo escogido será siempre el mas bajo posible en la jerarquía.\n\nAlgunos tipos especiales:\n\n- **Any**: Es la clase de la que heredan todas las clases en Scala. Es la clase mas basica.\n- **AnyVal**: Es la clase padre de todas las clases que representan tipos primitivos.\n- **AnyRef**: Es la clase padre de todas las clases que no representan tipos primitivos. Todas las subclases de Scala y Java heredan de ella.\n- **ScalaObject**: Es la clase de la que heredan todas y solo las clases de Scala.\n- **Unit**: Equivale a `void`. Usar cuando una función no debe retornar ningún valor.\n- **Nothing**: Es la clase que hereda de todas las clases. Usar solo cuando no acaba la ejecución como en `While(true)`.", "_____no_output_____" ], [ "### Declaración de funciones", "_____no_output_____" ] ], [ [ "def funcion1(a:Int,b:Int):Int={\n return a+b\n}", "_____no_output_____" ], [ "def funcion2(a:Int,b:Int)={\n a+b\n}", "_____no_output_____" ], [ "def funcion3(a:Int,b:Int)=a+b", "_____no_output_____" ] ], [ [ "Al igual que con la declaración de variables no es obligatorio declarar el tipo devuelto por la función. Si no se declara una sentencia `return`, el valor de la ultima instrucción es el devuelto por la función.", "_____no_output_____" ], [ "### Interpolación de cadenas\nLa interpolación de cadenas consiste insertar el valor de una variable dentro de una cadena, tambien es posible usar expresiones. ", "_____no_output_____" ] ], [ [ "val valor=1\nval expresion=2\n\nprintln(s\"El valor de la variable ${valor} y la expresion vale ${expresion+1}\") ", "El valor de la variable 1 y la expresion vale 3\n" ] ], [ [ "## Estructuras de selección", "_____no_output_____" ], [ "### If/Else", "_____no_output_____" ] ], [ [ "//Funciona igual que en Java\nval verdad:Boolean=true;\n\nif (verdad){\n println(\"Hola\")\n}else{\n println(\"Adios\") \n}\n", "_____no_output_____" ] ], [ [ "En Scala no existe la estructura `switch`, en su lugar existe lo conocido como *pattern matching*", "_____no_output_____" ], [ "### Match", "_____no_output_____" ] ], [ [ "val numero:Int=3\n\nval nombre=numero match{ //Puede ir dentro de la llamada a una funcion\n case 1=> \"Uno\"\n case 2=> \"Dos\"\n case 3=> \"Tres\"\n case _=> \"Ninguno\" //Es obligatorio incluir una clausula con _ que se ejecuta cuando no hay coincidencia\n}\n\nprintln(nombre)", "_____no_output_____" ] ], [ [ "## Estructuras de repetición", "_____no_output_____" ], [ "### Bucle *While*", "_____no_output_____" ] ], [ [ "//Igual que en Java\nvar x=0\n\nwhile(x<5){\n print(x)\n x+=1\n}", "_____no_output_____" ] ], [ [ "### Bucle *Do While*", "_____no_output_____" ] ], [ [ "//Igual que en Java\nvar x=0\n\ndo{\n print(x)\n x+=1\n}while(x<5)", "_____no_output_____" ] ], [ [ "### Bucle *For*", "_____no_output_____" ] ], [ [ "println(\"For to\")\nfor(i<- 1 to 5){ //Hasta el limite inclusive\n print(i)\n}\n\nprintln(\"\\nFor until\")\nfor(i<- 1 until 5){ //Hasta el limite exclusive\n print(i)\n}\n\nprintln(\"\\nFor para colecciones\")\nfor(i <- List(1,2,3,4)){ //For para recorrer colecciones\n print(i)\n}", "_____no_output_____" ] ], [ [ "### *foreach*", "_____no_output_____" ] ], [ [ "val lista=List(1,2,3,4)\n\nlista.foreach(x=> print(x)) //La funcion no devuelve nada y no modifica el conjunto", "_____no_output_____" ] ], [ [ "## Clases", "_____no_output_____" ], [ "### Indicaciones previas\nSe deben declarar entre parentesis todos los atributos que vaya a usar la clase. Se pueden declarar otros constructores mediante la definición de this, pero siempre se debe llamar al constructor por defecto que es el que contiene todos los atributos.\n\nLos parametros de un constructor constituyen los atributos de la clase y son privados por defecto, si se desea que sean públicos, se debe agregar val (o var) en la declaracion del argumento. Tambien es posible declarar atributos dentro de la propia clase. Estos pueden llevar los modificadores de `public`, `private` o `readonly`.", "_____no_output_____" ], [ "### Constructor por defecto", "_____no_output_____" ] ], [ [ "//Declaracion de clases\nclass Saludo(mensaje: String) { //Estos son los atributos y son accesibles desde cualquier metodo de la clase\n \n def diHola(nombre:String):Unit ={\n println(mensaje+\" \"+nombre);\n }\n}\n\nval saludo = new Saludo(\"Hola\")\nsaludo.diHola(\"Pepe\")", "_____no_output_____" ] ], [ [ "### Constructor propio", "_____no_output_____" ] ], [ [ "class OtroSaludo(m:String,nombre:String){ //Se deben declarar todos los atributos que se vayan a usar\n \n def this()={\n this(\"Hola\",\"Pepe\") //Siempre se debe llamar al constructor por defecto\n }\n \n def this(mensaje:String){\n this(\"Hola\",\"Jose\")\n }\n \n def saludar()={\n println(this.m+\" \"+nombre)\n }\n\n}\n\nval sal=new OtroSaludo()\nsal.saludar()", "_____no_output_____" ] ], [ [ "### Herencia", "_____no_output_____" ] ], [ [ "class Punto(var x:Int,var y:Int){\n \n def mover(dx:Int,dy:Int):Unit={\n this.x=dx\n this.y=dy\n }\n}\n\nclass Particula(x:Int,y:Int,masa:Int) extends Punto(x:Int,y:Int){\n \n override def toString():String={ //Para redefinir un metodo de una clase padre agregar override\n return s\"X:${this.x} Y:${this.y} M:${this.masa}\";\n }\n \n}\n\nval particula=new Particula(0,0,0);\nparticula.mover(1,1)\nprintln(particula.toString())", "_____no_output_____" ] ], [ [ "### Clases abstractas", "_____no_output_____" ] ], [ [ "abstract class Figura(lado:Int){\n \n def getPerimetro():Double; //Metodo sin implementacion\n \n def printLado():Unit= println(\"El lado mide \"+this.lado) //Metodo implementado\n}\n\nclass Cuadrado(lado:Int,n:Int) extends Figura(lado:Int){\n \n override def getPerimetro():Double={\n return lado*lado;\n }\n}\n\nval figura:Figura=new Cuadrado(4,0)\nprintln(\"El perimetro es \"+figura.getPerimetro())\nfigura.printLado();", "_____no_output_____" ] ], [ [ "## Traits", "_____no_output_____" ], [ "Son similares a las interfaces de otros lenguajes de programación. Sin embargo cuenta con dos principales diferencias respecto de las interfaces:\n- Pueden ser parcialmente implementadas como ocurre en las clases abstractas.\n- No pueden tener parametros en el constructor.", "_____no_output_____" ] ], [ [ "trait Correo{\n def enviar():Unit;\n def recibir(mensaje:String):Unit={\n println(s\"Mensaje recibido: ${mensaje}\")\n }\n}\n\nclass CorreoPostal() extends Correo{\n \n override def enviar()={\n println(\"Enviado desde correo postal\")\n }\n}\n\nclass CorreoElectronico(usuario:String) extends Correo{\n \n override def enviar()={\n println(s\"Enviado por ${usuario}\")\n }\n}\n\nval carta:Correo=new CorreoPostal()\nval email:Correo=new CorreoElectronico(\"pepe\")\n\ncarta.enviar()\ncarta.recibir(\"Hola desde carta\")\n\nemail.enviar()\nemail.recibir(\"Hola desde email\")\n", "_____no_output_____" ] ], [ [ "## Colecciones", "_____no_output_____" ], [ "Las colecciones por defecto incluidas son inmutables, no se puede agregar ni eliminar elementos. Las operaciones como *add* y similares lo que hacen es devolver una nueva colección con los nuevos elementos. Al crear la nueva colección se agregan las referencias de los objetos y por tanto casi no tiene penalización en tiempo de ejecución y en consumo de memoria.", "_____no_output_____" ] ], [ [ "val lista=List(1,2,3) //Lista inmutable\n0::lista //Devuelve una lista con el nuevo elemento insertado al principio\nlista.head //Devuelve el primer elemento de la lista\nlista.tail //Devuelve toda la lista excepto el primer elemento\nlista:::lista //Concatena dos listas y devuelve el resultado", "_____no_output_____" ] ], [ [ "### Operaciones y funciones sobre conjuntos (y similares)", "_____no_output_____" ] ], [ [ "val conjunto=Set(1,2,3)\n\nval conjunto2=conjunto.map(x => x+3) //Ejecuta la funcion que se le pasa a cada miembro de la coleccion\n\nval conjunto3=List(conjunto,conjunto2).flatten //Crea una nueva coleccion con los elementos de las sub-colecciones\n\nSet(1,4,9).flatMap { x => Set(x,x+1) } //FlatMap\n\nval lista=(List(1,2,3)++List(1,2,3))\nlista.distinct //Devuelve una lista con todos los elementos distintos\n\nSet(1,2,3)(1) //Devuelve true si el elemento esta contenido en la coleccion, false en caso contrario\nList(4,5,6)(1) //Devuelve el elemento de la posicion indicada\n\nval conjuntoImpares=conjunto.filter(x => x%2!=0) //Devuelve otro conjunto con los elementos que superen el filtro\n\nval escalar:Int=1\n\n//Para conjuntos inmutables\nconjunto+escalar //Agrega el elemento al conjunto y devuelve una copia\nconjunto++conjunto2 //Union de conjuntos\nconjunto-escalar //Extrae del conjunto\nconjunto--conjunto2 //Diferencia de conjuntos\nconjunto&conjunto2 //Interseccion\n\n//Solo para conjuntos mutables\nval conjuntoMutable=scala.collection.mutable.Set(1,2,3)\nval conjuntoMutable2=scala.collection.mutable.Set(3,4,5)\n\nconjuntoMutable+= escalar //Agrega el valor al conjunto\nconjuntoMutable++=conjuntoMutable2 //Agrega los elementos del segundo conjunto al primero\nconjuntoMutable retain { x=> x%2==0} //Se queda solo con los elementos que cumplan la condicion", "_____no_output_____" ] ], [ [ "## Mapas", "_____no_output_____" ], [ "Son estructuras clave/valor similares a los Mapas de Java o los diccionarios de Python.", "_____no_output_____" ] ], [ [ "val mapa=Map(1->\"Uno\",2->\"Dos\",3->\"Tres\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d01553870d5fb779ef707dd02956fdbc72c2c200
34,999
ipynb
Jupyter Notebook
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
769a215505e4fdb706e85bcae37f7bbf662389eb
[ "MIT" ]
2
2022-01-11T23:51:03.000Z
2022-01-31T14:41:01.000Z
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
769a215505e4fdb706e85bcae37f7bbf662389eb
[ "MIT" ]
4
2022-02-05T00:25:13.000Z
2022-02-26T21:38:45.000Z
assets/assignments/a4_dcgan.ipynb
uoft-csc413/2022
769a215505e4fdb706e85bcae37f7bbf662389eb
[ "MIT" ]
3
2022-02-04T23:29:49.000Z
2022-02-26T19:33:45.000Z
34,999
34,999
0.561673
[ [ [ "# Colab FAQ\n\nFor some basic overview and features offered in Colab notebooks, check out: [Overview of Colaboratory Features](https://colab.research.google.com/notebooks/basic_features_overview.ipynb)\n\nYou need to use the colab GPU for this assignmentby selecting:\n\n> **Runtime**   →   **Change runtime type**   →   **Hardware Accelerator: GPU**", "_____no_output_____" ], [ "# Setup PyTorch\nAll files are stored at /content/csc421/a4/ folder\n", "_____no_output_____" ] ], [ [ "######################################################################\n# Setup python environment and change the current working directory\n######################################################################\n!pip install torch torchvision\n!pip install imageio\n\n!pip install matplotlib\n\n%mkdir -p /content/csc413/a4/\n%cd /content/csc413/a4", "_____no_output_____" ] ], [ [ "# Helper code", "_____no_output_____" ], [ "## Utility functions", "_____no_output_____" ] ], [ [ "import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport torch\nfrom torch import nn\nfrom torch.nn import Parameter\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\n\nfrom six.moves.urllib.request import urlretrieve\nimport tarfile\n\nimport imageio\nfrom urllib.error import URLError\nfrom urllib.error import HTTPError\n\n\ndef get_file(fname,\n origin,\n untar=False,\n extract=False,\n archive_format='auto',\n cache_dir='data'):\n datadir = os.path.join(cache_dir)\n if not os.path.exists(datadir):\n os.makedirs(datadir)\n\n if untar:\n untar_fpath = os.path.join(datadir, fname)\n fpath = untar_fpath + '.tar.gz'\n else:\n fpath = os.path.join(datadir, fname)\n\n print(fpath)\n if not os.path.exists(fpath):\n print('Downloading data from', origin)\n\n error_msg = 'URL fetch failure on {}: {} -- {}'\n try:\n try:\n urlretrieve(origin, fpath)\n except URLError as e:\n raise Exception(error_msg.format(origin, e.errno, e.reason))\n except HTTPError as e:\n raise Exception(error_msg.format(origin, e.code, e.msg))\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(fpath):\n os.remove(fpath)\n raise\n\n if untar:\n if not os.path.exists(untar_fpath):\n print('Extracting file.')\n with tarfile.open(fpath) as archive:\n archive.extractall(datadir)\n return untar_fpath\n\n return fpath\n\n\nclass AttrDict(dict):\n def __init__(self, *args, **kwargs):\n super(AttrDict, self).__init__(*args, **kwargs)\n self.__dict__ = self\n\n \ndef to_var(tensor, cuda=True):\n \"\"\"Wraps a Tensor in a Variable, optionally placing it on the GPU.\n\n Arguments:\n tensor: A Tensor object.\n cuda: A boolean flag indicating whether to use the GPU.\n\n Returns:\n A Variable object, on the GPU if cuda==True.\n \"\"\"\n if cuda:\n return Variable(tensor.cuda())\n else:\n return Variable(tensor)\n\n \ndef to_data(x):\n \"\"\"Converts variable to numpy.\"\"\"\n if torch.cuda.is_available():\n x = x.cpu()\n return x.data.numpy()\n\n\ndef create_dir(directory):\n \"\"\"Creates a directory if it doesn't already exist.\n \"\"\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n\ndef gan_checkpoint(iteration, G, D, opts):\n \"\"\"Saves the parameters of the generator G and discriminator D.\n \"\"\"\n G_path = os.path.join(opts.checkpoint_dir, 'G.pkl')\n D_path = os.path.join(opts.checkpoint_dir, 'D.pkl')\n torch.save(G.state_dict(), G_path)\n torch.save(D.state_dict(), D_path)\n\ndef load_checkpoint(opts):\n \"\"\"Loads the generator and discriminator models from checkpoints.\n \"\"\"\n G_path = os.path.join(opts.load, 'G.pkl')\n D_path = os.path.join(opts.load, 'D_.pkl')\n\n G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm)\n D = DCDiscriminator(conv_dim=opts.d_conv_dim)\n\n G.load_state_dict(torch.load(G_path, map_location=lambda storage, loc: storage))\n D.load_state_dict(torch.load(D_path, map_location=lambda storage, loc: storage))\n\n if torch.cuda.is_available():\n G.cuda()\n D.cuda()\n print('Models moved to GPU.')\n\n return G, D\n\n\ndef merge_images(sources, targets, opts):\n \"\"\"Creates a grid consisting of pairs of columns, where the first column in\n each pair contains images source images and the second column in each pair\n contains images generated by the CycleGAN from the corresponding images in\n the first column.\n \"\"\"\n _, _, h, w = sources.shape\n row = int(np.sqrt(opts.batch_size))\n merged = np.zeros([3, row * h, row * w * 2])\n for (idx, s, t) in (zip(range(row ** 2), sources, targets, )):\n i = idx // row\n j = idx % row\n merged[:, i * h:(i + 1) * h, (j * 2) * h:(j * 2 + 1) * h] = s\n merged[:, i * h:(i + 1) * h, (j * 2 + 1) * h:(j * 2 + 2) * h] = t\n return merged.transpose(1, 2, 0)\n\n\ndef generate_gif(directory_path, keyword=None):\n images = []\n for filename in sorted(os.listdir(directory_path)):\n if filename.endswith(\".png\") and (keyword is None or keyword in filename):\n img_path = os.path.join(directory_path, filename)\n print(\"adding image {}\".format(img_path))\n images.append(imageio.imread(img_path))\n\n if keyword:\n imageio.mimsave(\n os.path.join(directory_path, 'anim_{}.gif'.format(keyword)), images)\n else:\n imageio.mimsave(os.path.join(directory_path, 'anim.gif'), images)\n\n\ndef create_image_grid(array, ncols=None):\n \"\"\"\n \"\"\"\n num_images, channels, cell_h, cell_w = array.shape\n if not ncols:\n ncols = int(np.sqrt(num_images))\n nrows = int(np.math.floor(num_images / float(ncols)))\n result = np.zeros((cell_h * nrows, cell_w * ncols, channels), dtype=array.dtype)\n for i in range(0, nrows):\n for j in range(0, ncols):\n result[i * cell_h:(i + 1) * cell_h, j * cell_w:(j + 1) * cell_w, :] = array[i * ncols + j].transpose(1, 2,\n 0)\n\n if channels == 1:\n result = result.squeeze()\n return result\n\n\ndef gan_save_samples(G, fixed_noise, iteration, opts):\n generated_images = G(fixed_noise)\n generated_images = to_data(generated_images)\n\n grid = create_image_grid(generated_images)\n\n # merged = merge_images(X, fake_Y, opts)\n path = os.path.join(opts.sample_dir, 'sample-{:06d}.png'.format(iteration))\n imageio.imwrite(path, grid)\n print('Saved {}'.format(path))", "_____no_output_____" ] ], [ [ "## Data loader", "_____no_output_____" ] ], [ [ "def get_emoji_loader(emoji_type, opts):\n \"\"\"Creates training and test data loaders.\n \"\"\"\n transform = transforms.Compose([\n transforms.Scale(opts.image_size),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n ])\n\n train_path = os.path.join('data/emojis', emoji_type)\n test_path = os.path.join('data/emojis', 'Test_{}'.format(emoji_type))\n\n train_dataset = datasets.ImageFolder(train_path, transform)\n test_dataset = datasets.ImageFolder(test_path, transform)\n\n train_dloader = DataLoader(dataset=train_dataset, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers)\n test_dloader = DataLoader(dataset=test_dataset, batch_size=opts.batch_size, shuffle=False, num_workers=opts.num_workers)\n\n return train_dloader, test_dloader", "_____no_output_____" ] ], [ [ "## Training and evaluation code", "_____no_output_____" ] ], [ [ "def print_models(G_XtoY, G_YtoX, D_X, D_Y):\n \"\"\"Prints model information for the generators and discriminators.\n \"\"\"\n print(\" G \")\n print(\"---------------------------------------\")\n print(G_XtoY)\n print(\"---------------------------------------\")\n\n print(\" D \")\n print(\"---------------------------------------\")\n print(D_X)\n print(\"---------------------------------------\")\n\n\ndef create_model(opts):\n \"\"\"Builds the generators and discriminators.\n \"\"\"\n ### GAN\n G = DCGenerator(noise_size=opts.noise_size, conv_dim=opts.g_conv_dim, spectral_norm=opts.spectral_norm)\n D = DCDiscriminator(conv_dim=opts.d_conv_dim, spectral_norm=opts.spectral_norm)\n\n print_models(G, None, D, None)\n\n if torch.cuda.is_available():\n G.cuda()\n D.cuda()\n print('Models moved to GPU.')\n return G, D\n\ndef train(opts):\n \"\"\"Loads the data, creates checkpoint and sample directories, and starts the training loop.\n \"\"\"\n\n # Create train and test dataloaders for images from the two domains X and Y\n dataloader_X, test_dataloader_X = get_emoji_loader(emoji_type=opts.X, opts=opts)\n \n # Create checkpoint and sample directories\n create_dir(opts.checkpoint_dir)\n create_dir(opts.sample_dir)\n\n # Start training\n if opts.least_squares_gan:\n G, D = gan_training_loop_leastsquares(dataloader_X, test_dataloader_X, opts)\n else:\n G, D = gan_training_loop_regular(dataloader_X, test_dataloader_X, opts)\n \n return G, D\n\ndef print_opts(opts):\n \"\"\"Prints the values of all command-line arguments.\n \"\"\"\n print('=' * 80)\n print('Opts'.center(80))\n print('-' * 80)\n for key in opts.__dict__:\n if opts.__dict__[key]:\n print('{:>30}: {:<30}'.format(key, opts.__dict__[key]).center(80))\n print('=' * 80)\n", "_____no_output_____" ] ], [ [ "# Your code for generators and discriminators", "_____no_output_____" ], [ "## Helper modules", "_____no_output_____" ] ], [ [ "def sample_noise(batch_size, dim):\n \"\"\"\n Generate a PyTorch Tensor of uniform random noise.\n\n Input:\n - batch_size: Integer giving the batch size of noise to generate.\n - dim: Integer giving the dimension of noise to generate.\n\n Output:\n - A PyTorch Tensor of shape (batch_size, dim, 1, 1) containing uniform\n random noise in the range (-1, 1).\n \"\"\"\n return to_var(torch.rand(batch_size, dim) * 2 - 1).unsqueeze(2).unsqueeze(3)\n \n\ndef upconv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, spectral_norm=False):\n \"\"\"Creates a upsample-and-convolution layer, with optional batch normalization.\n \"\"\"\n layers = []\n if stride>1:\n layers.append(nn.Upsample(scale_factor=stride))\n conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=1, padding=padding, bias=False)\n if spectral_norm:\n layers.append(SpectralNorm(conv_layer))\n else:\n layers.append(conv_layer)\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)\n\n\ndef conv(in_channels, out_channels, kernel_size, stride=2, padding=2, batch_norm=True, init_zero_weights=False, spectral_norm=False):\n \"\"\"Creates a convolutional layer, with optional batch normalization.\n \"\"\"\n layers = []\n conv_layer = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False)\n if init_zero_weights:\n conv_layer.weight.data = torch.randn(out_channels, in_channels, kernel_size, kernel_size) * 0.001\n \n if spectral_norm:\n layers.append(SpectralNorm(conv_layer))\n else:\n layers.append(conv_layer)\n\n if batch_norm:\n layers.append(nn.BatchNorm2d(out_channels))\n return nn.Sequential(*layers)\n \n\nclass ResnetBlock(nn.Module):\n def __init__(self, conv_dim):\n super(ResnetBlock, self).__init__()\n self.conv_layer = conv(in_channels=conv_dim, out_channels=conv_dim, kernel_size=3, stride=1, padding=1)\n\n def forward(self, x):\n out = x + self.conv_layer(x)\n return out", "_____no_output_____" ] ], [ [ "## DCGAN", "_____no_output_____" ], [ "### Spectral Norm class", "_____no_output_____" ] ], [ [ "def l2normalize(v, eps=1e-12):\n return v / (v.norm() + eps)\n\n\nclass SpectralNorm(nn.Module):\n def __init__(self, module, name='weight', power_iterations=1):\n super(SpectralNorm, self).__init__()\n self.module = module\n self.name = name\n self.power_iterations = power_iterations\n if not self._made_params():\n self._make_params()\n\n def _update_u_v(self):\n u = getattr(self.module, self.name + \"_u\")\n v = getattr(self.module, self.name + \"_v\")\n w = getattr(self.module, self.name + \"_bar\")\n\n height = w.data.shape[0]\n for _ in range(self.power_iterations):\n v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))\n u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))\n\n # sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))\n sigma = u.dot(w.view(height, -1).mv(v))\n setattr(self.module, self.name, w / sigma.expand_as(w))\n\n def _made_params(self):\n try:\n u = getattr(self.module, self.name + \"_u\")\n v = getattr(self.module, self.name + \"_v\")\n w = getattr(self.module, self.name + \"_bar\")\n return True\n except AttributeError:\n return False\n\n def _make_params(self):\n w = getattr(self.module, self.name)\n\n height = w.data.shape[0]\n width = w.view(height, -1).data.shape[1]\n\n u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)\n v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)\n u.data = l2normalize(u.data)\n v.data = l2normalize(v.data)\n w_bar = Parameter(w.data)\n\n del self.module._parameters[self.name]\n\n self.module.register_parameter(self.name + \"_u\", u)\n self.module.register_parameter(self.name + \"_v\", v)\n self.module.register_parameter(self.name + \"_bar\", w_bar)\n\n def forward(self, *args):\n self._update_u_v()\n return self.module.forward(*args)", "_____no_output_____" ] ], [ [ "### **[Your Task]** GAN generator", "_____no_output_____" ] ], [ [ "class DCGenerator(nn.Module):\n def __init__(self, noise_size, conv_dim, spectral_norm=False):\n super(DCGenerator, self).__init__()\n\n self.conv_dim = conv_dim\n ###########################################\n ## FILL THIS IN: CREATE ARCHITECTURE ##\n ###########################################\n\n # self.linear_bn = ...\n # self.upconv1 = ...\n # self.upconv2 = ...\n # self.upconv3 = ...\n\n def forward(self, z):\n \"\"\"Generates an image given a sample of random noise.\n\n Input\n -----\n z: BS x noise_size x 1 x 1 --> BSx100x1x1 (during training)\n\n Output\n ------\n out: BS x channels x image_width x image_height --> BSx3x32x32 (during training)\n \"\"\"\n batch_size = z.size(0)\n \n out = F.relu(self.linear_bn(z)).view(-1, self.conv_dim*4, 4, 4) # BS x 128 x 4 x 4\n out = F.relu(self.upconv1(out)) # BS x 64 x 8 x 8\n out = F.relu(self.upconv2(out)) # BS x 32 x 16 x 16\n out = F.tanh(self.upconv3(out)) # BS x 3 x 32 x 32\n \n out_size = out.size()\n if out_size != torch.Size([batch_size, 3, 32, 32]):\n raise ValueError(\"expect {} x 3 x 32 x 32, but get {}\".format(batch_size, out_size))\n return out\n", "_____no_output_____" ] ], [ [ "### GAN discriminator", "_____no_output_____" ] ], [ [ "class DCDiscriminator(nn.Module):\n \"\"\"Defines the architecture of the discriminator network.\n Note: Both discriminators D_X and D_Y have the same architecture in this assignment.\n \"\"\"\n def __init__(self, conv_dim=64, spectral_norm=False):\n super(DCDiscriminator, self).__init__()\n\n\n self.conv1 = conv(in_channels=3, out_channels=conv_dim, kernel_size=5, stride=2, spectral_norm=spectral_norm)\n self.conv2 = conv(in_channels=conv_dim, out_channels=conv_dim*2, kernel_size=5, stride=2, spectral_norm=spectral_norm)\n self.conv3 = conv(in_channels=conv_dim*2, out_channels=conv_dim*4, kernel_size=5, stride=2, spectral_norm=spectral_norm)\n self.conv4 = conv(in_channels=conv_dim*4, out_channels=1, kernel_size=5, stride=2, padding=1, batch_norm=False, spectral_norm=spectral_norm)\n\n def forward(self, x):\n batch_size = x.size(0)\n\n out = F.relu(self.conv1(x)) # BS x 64 x 16 x 16\n out = F.relu(self.conv2(out)) # BS x 64 x 8 x 8\n out = F.relu(self.conv3(out)) # BS x 64 x 4 x 4\n\n out = self.conv4(out).squeeze()\n out_size = out.size()\n if out_size != torch.Size([batch_size,]):\n raise ValueError(\"expect {} x 1, but get {}\".format(batch_size, out_size))\n return out", "_____no_output_____" ] ], [ [ "### **[Your Task]** GAN training loop \n\n\n* Regular GAN\n* Least Squares GAN\n\n", "_____no_output_____" ] ], [ [ "def gan_training_loop_regular(dataloader, test_dataloader, opts):\n \"\"\"Runs the training loop.\n * Saves checkpoint every opts.checkpoint_every iterations\n * Saves generated samples every opts.sample_every iterations\n \"\"\"\n\n # Create generators and discriminators\n G, D = create_model(opts)\n\n g_params = G.parameters() # Get generator parameters\n d_params = D.parameters() # Get discriminator parameters\n\n # Create optimizers for the generators and discriminators\n g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2])\n d_optimizer = optim.Adam(d_params, opts.lr * 2., [opts.beta1, opts.beta2])\n\n train_iter = iter(dataloader)\n\n test_iter = iter(test_dataloader)\n\n # Get some fixed data from domains X and Y for sampling. These are images that are held\n # constant throughout training, that allow us to inspect the model's performance.\n fixed_noise = sample_noise(100, opts.noise_size) # # 100 x noise_size x 1 x 1\n\n iter_per_epoch = len(train_iter)\n total_train_iters = opts.train_iters\n\n losses = {\"iteration\": [], \"D_fake_loss\": [], \"D_real_loss\": [], \"G_loss\": []}\n \n gp_weight = 1\n\n adversarial_loss = torch.nn.BCEWithLogitsLoss() # Use this loss\n # [Hint: you may find the folowing code helpful]\n # ones = Variable(torch.Tensor(real_images.shape[0]).float().cuda().fill_(1.0), requires_grad=False)\n\n try:\n for iteration in range(1, opts.train_iters + 1):\n\n # Reset data_iter for each epoch\n if iteration % iter_per_epoch == 0:\n train_iter = iter(dataloader)\n\n real_images, real_labels = train_iter.next()\n real_images, real_labels = to_var(real_images), to_var(real_labels).long().squeeze()\n \n\n for d_i in range(opts.d_train_iters):\n d_optimizer.zero_grad()\n\n # FILL THIS IN\n # 1. Compute the discriminator loss on real images\n # D_real_loss = ...\n\n # 2. Sample noise\n # noise = ...\n\n # 3. Generate fake images from the noise\n # fake_images = ... \n\n # 4. Compute the discriminator loss on the fake images\n # D_fake_loss = ...\n\n # ---- Gradient Penalty ----\n if opts.gradient_penalty:\n alpha = torch.rand(real_images.shape[0], 1, 1, 1)\n alpha = alpha.expand_as(real_images).cuda()\n interp_images = Variable(alpha * real_images.data + (1 - alpha) * fake_images.data, requires_grad=True).cuda()\n D_interp_output = D(interp_images)\n\n gradients = torch.autograd.grad(outputs=D_interp_output, inputs=interp_images,\n grad_outputs=torch.ones(D_interp_output.size()).cuda(),\n create_graph=True, retain_graph=True)[0]\n gradients = gradients.view(real_images.shape[0], -1)\n gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n gp = gp_weight * gradients_norm.mean()\n else:\n gp = 0.0\n\n # --------------------------\n # 5. Compute the total discriminator loss\n # D_total_loss = ...\n\n D_total_loss.backward()\n d_optimizer.step()\n\n ###########################################\n ### TRAIN THE GENERATOR ###\n ###########################################\n\n g_optimizer.zero_grad()\n\n # FILL THIS IN\n # 1. Sample noise\n # noise = ...\n\n # 2. Generate fake images from the noise\n # fake_images = ...\n\n # 3. Compute the generator loss\n # G_loss = ...\n\n G_loss.backward()\n g_optimizer.step()\n\n # Print the log info\n if iteration % opts.log_step == 0:\n losses['iteration'].append(iteration)\n losses['D_real_loss'].append(D_real_loss.item())\n losses['D_fake_loss'].append(D_fake_loss.item())\n losses['G_loss'].append(G_loss.item())\n print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f}'.format(\n iteration, total_train_iters, D_real_loss.item(), D_fake_loss.item(), G_loss.item()))\n\n # Save the generated samples\n if iteration % opts.sample_every == 0:\n gan_save_samples(G, fixed_noise, iteration, opts)\n\n # Save the model parameters\n if iteration % opts.checkpoint_every == 0:\n gan_checkpoint(iteration, G, D, opts)\n\n except KeyboardInterrupt:\n print('Exiting early from training.')\n return G, D\n\n plt.figure()\n plt.plot(losses['iteration'], losses['D_real_loss'], label='D_real')\n plt.plot(losses['iteration'], losses['D_fake_loss'], label='D_fake')\n plt.plot(losses['iteration'], losses['G_loss'], label='G')\n plt.legend()\n plt.savefig(os.path.join(opts.sample_dir, 'losses.png'))\n plt.close()\n return G, D", "_____no_output_____" ], [ "def gan_training_loop_leastsquares(dataloader, test_dataloader, opts):\n \"\"\"Runs the training loop.\n * Saves checkpoint every opts.checkpoint_every iterations\n * Saves generated samples every opts.sample_every iterations\n \"\"\"\n\n # Create generators and discriminators\n G, D = create_model(opts)\n\n g_params = G.parameters() # Get generator parameters\n d_params = D.parameters() # Get discriminator parameters\n\n # Create optimizers for the generators and discriminators\n g_optimizer = optim.Adam(g_params, opts.lr, [opts.beta1, opts.beta2])\n d_optimizer = optim.Adam(d_params, opts.lr * 2., [opts.beta1, opts.beta2])\n\n train_iter = iter(dataloader)\n\n test_iter = iter(test_dataloader)\n\n # Get some fixed data from domains X and Y for sampling. These are images that are held\n # constant throughout training, that allow us to inspect the model's performance.\n fixed_noise = sample_noise(100, opts.noise_size) # # 100 x noise_size x 1 x 1\n\n iter_per_epoch = len(train_iter)\n total_train_iters = opts.train_iters\n\n losses = {\"iteration\": [], \"D_fake_loss\": [], \"D_real_loss\": [], \"G_loss\": []}\n\n #adversarial_loss = torch.nn.BCEWithLogitsLoss()\n gp_weight = 1\n\n try:\n for iteration in range(1, opts.train_iters + 1):\n\n # Reset data_iter for each epoch\n if iteration % iter_per_epoch == 0:\n train_iter = iter(dataloader)\n\n real_images, real_labels = train_iter.next()\n real_images, real_labels = to_var(real_images), to_var(real_labels).long().squeeze()\n\n\n for d_i in range(opts.d_train_iters):\n d_optimizer.zero_grad()\n\n # FILL THIS IN\n # 1. Compute the discriminator loss on real images\n # D_real_loss = ...\n\n # 2. Sample noise\n # noise = ...\n\n # 3. Generate fake images from the noise\n # fake_images = ...\n\n # 4. Compute the discriminator loss on the fake images\n # D_fake_loss = ...\n\n # ---- Gradient Penalty ----\n if opts.gradient_penalty:\n alpha = torch.rand(real_images.shape[0], 1, 1, 1)\n alpha = alpha.expand_as(real_images).cuda()\n interp_images = Variable(alpha * real_images.data + (1 - alpha) * fake_images.data, requires_grad=True).cuda()\n D_interp_output = D(interp_images)\n\n gradients = torch.autograd.grad(outputs=D_interp_output, inputs=interp_images,\n grad_outputs=torch.ones(D_interp_output.size()).cuda(),\n create_graph=True, retain_graph=True)[0]\n gradients = gradients.view(real_images.shape[0], -1)\n gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)\n\n gp = gp_weight * gradients_norm.mean()\n else:\n gp = 0.0\n\n # --------------------------\n # 5. Compute the total discriminator loss\n # D_total_loss = ...\n\n D_total_loss.backward()\n d_optimizer.step()\n\n ###########################################\n ### TRAIN THE GENERATOR ###\n ###########################################\n\n g_optimizer.zero_grad()\n\n # FILL THIS IN\n # 1. Sample noise\n # noise = ...\n\n # 2. Generate fake images from the noise\n # fake_images = ...\n\n # 3. Compute the generator loss\n # G_loss = ...\n\n G_loss.backward()\n g_optimizer.step()\n\n # Print the log info\n if iteration % opts.log_step == 0:\n losses['iteration'].append(iteration)\n losses['D_real_loss'].append(D_real_loss.item())\n losses['D_fake_loss'].append(D_fake_loss.item())\n losses['G_loss'].append(G_loss.item())\n print('Iteration [{:4d}/{:4d}] | D_real_loss: {:6.4f} | D_fake_loss: {:6.4f} | G_loss: {:6.4f}'.format(\n iteration, total_train_iters, D_real_loss.item(), D_fake_loss.item(), G_loss.item()))\n\n # Save the generated samples\n if iteration % opts.sample_every == 0:\n gan_save_samples(G, fixed_noise, iteration, opts)\n\n # Save the model parameters\n if iteration % opts.checkpoint_every == 0:\n gan_checkpoint(iteration, G, D, opts)\n\n except KeyboardInterrupt:\n print('Exiting early from training.')\n return G, D\n\n plt.figure()\n plt.plot(losses['iteration'], losses['D_real_loss'], label='D_real')\n plt.plot(losses['iteration'], losses['D_fake_loss'], label='D_fake')\n plt.plot(losses['iteration'], losses['G_loss'], label='G')\n plt.legend()\n plt.savefig(os.path.join(opts.sample_dir, 'losses.png'))\n plt.close()\n return G, D", "_____no_output_____" ] ], [ [ "# **[Your Task]** Training\n", "_____no_output_____" ], [ "## Download dataset", "_____no_output_____" ] ], [ [ "######################################################################\n# Download Translation datasets\n######################################################################\ndata_fpath = get_file(fname='emojis', \n origin='http://www.cs.toronto.edu/~jba/emojis.tar.gz', \n untar=True)", "_____no_output_____" ] ], [ [ "## Train DCGAN", "_____no_output_____" ] ], [ [ "SEED = 11\n\n# Set the random seed manually for reproducibility.\nnp.random.seed(SEED)\ntorch.manual_seed(SEED)\nif torch.cuda.is_available():\n torch.cuda.manual_seed(SEED)\n\n\nargs = AttrDict()\nargs_dict = {\n 'image_size':32, \n 'g_conv_dim':32, \n 'd_conv_dim':64,\n 'noise_size':100,\n 'num_workers': 0,\n 'train_iters':20000,\n 'X':'Apple', # options: 'Windows' / 'Apple'\n 'Y': None,\n 'lr':0.00003,\n 'beta1':0.5,\n 'beta2':0.999,\n 'batch_size':32, \n 'checkpoint_dir': 'results/checkpoints_gan_gp1_lr3e-5',\n 'sample_dir': 'results/samples_gan_gp1_lr3e-5',\n 'load': None,\n 'log_step':200,\n 'sample_every':200,\n 'checkpoint_every':1000,\n 'spectral_norm': False,\n 'gradient_penalty': True,\n 'least_squares_gan': False,\n 'd_train_iters': 1\n}\nargs.update(args_dict)\n\nprint_opts(args)\nG, D = train(args)\n\ngenerate_gif(\"results/samples_gan_gp1_lr3e-5\")", "_____no_output_____" ] ], [ [ "## Download your output", "_____no_output_____" ] ], [ [ "!zip -r /content/csc413/a4/results/samples.zip /content/csc413/a4/results/samples_gan_gp1_lr3e-5\n\nfrom google.colab import files\nfiles.download(\"/content/csc413/a4/results/samples.zip\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d01554720783f588f57e569dfdfe2809822e869a
12,939
ipynb
Jupyter Notebook
python/fig5_logit_all.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
null
null
null
python/fig5_logit_all.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
null
null
null
python/fig5_logit_all.ipynb
thomasnicolet/Paper_canteen_dilemma
e1cc51db694934717f4819849c6c40c3011905c7
[ "MIT" ]
1
2020-11-29T18:12:33.000Z
2020-11-29T18:12:33.000Z
59.902778
7,460
0.747508
[ [ [ "%matplotlib inline\nimport io\nimport os\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as patch\nimport pandas as pd\nimport numpy as np\nimport random\nimport seaborn as sns\nsns.set_style('white')\nsns.set_context('notebook')\nrandom.seed(1)", "c:\\users\\hjl161\\appdata\\local\\programs\\python\\python37\\lib\\site-packages\\statsmodels\\tools\\_testing.py:19: FutureWarning: pandas.util.testing is deprecated. Use the functions in the public API at pandas.testing instead.\n import pandas.util.testing as tm\n" ], [ "datafiles = [\n '../data/MTurk_anonymous.xlsx',\n '../data/DTU1_anonymous.xlsx',\n '../data/DTU2_anonymous.xlsx',\n]", "_____no_output_____" ], [ "df_all = pd.DataFrame()\nfor datafile in datafiles:\n df = pd.DataFrame(pd.read_excel(datafile))\n df_all = df_all.append(df, sort=True)", "_____no_output_____" ] ], [ [ "Do some cleaning and reformatting:", "_____no_output_____" ] ], [ [ "df.drop(df.columns[df.columns.str.contains('unnamed',case = False)], \n axis = 1, inplace = True)\ndf = df[['arrival', 'choice']]\ndf['arrival'].replace({9.0: 8.6, 9.1: 8.7}, inplace=True)\ndf.head()", "_____no_output_____" ], [ "fig, ax = plt.subplots()\nfig.set_size_inches(6.7, 1.2)\nfig = sns.regplot(x='arrival', y='choice', data=df, \n scatter_kws={\"color\": \"white\"}, \n ci=95, n_boot=10000, logistic=True, ax=ax)\nplt.setp(fig.collections[1], alpha=1) # setting translucency of CI to zero\nfig.set(xlim=(8.2, 8.7))\nfig.axis('off')\n#plt.rcParams['figure.figsize']=(6.7,.2)\nplt.rcParams[\"font.family\"] = \"sans-serif\"\nPLOTS_DIR = '../plots'\n\nif not os.path.exists(PLOTS_DIR):\n os.makedirs(PLOTS_DIR)\n\nplt.savefig(os.path.join(PLOTS_DIR, 'fig5_logit_all.png'),\n bbox_inches='tight', transparent=True, dpi=300)\nplt.savefig(os.path.join(PLOTS_DIR, 'fig5_logit_all.pdf'), transparent=True, dpi=300)\nsns.despine()", "_____no_output_____" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d0155bd456c7bc1a1f8713567df9133ff36bd9bf
28,096
ipynb
Jupyter Notebook
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
42465d3b087e263a9d991c814279269c48bc5359
[ "MIT" ]
null
null
null
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
42465d3b087e263a9d991c814279269c48bc5359
[ "MIT" ]
null
null
null
train_result/ml_ee_xxl_data_training_step7.ipynb
cufezhusy/mlXVA
42465d3b087e263a9d991c814279269c48bc5359
[ "MIT" ]
null
null
null
43.425039
205
0.506229
[ [ [ "## Initial Setup", "_____no_output_____" ] ], [ [ "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\nimport os\nimport math\nimport string\nimport re\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nimport helper\nimport pickle\n\nimport keras\n\nfrom keras.models import Sequential,load_model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D,Conv1D,MaxPooling1D\n\nlayers = keras.layers", "Using TensorFlow backend.\n" ] ], [ [ "## Training Parameters\n\nWe'll set the hyperparameters for training our model. If you understand what they mean, feel free to play around - otherwise, we recommend keeping the defaults for your first run 🙂", "_____no_output_____" ] ], [ [ "# Hyperparams if GPU is available\nif tf.test.is_gpu_available():\n print('---- We are using GPU now ----')\n # GPU\n BATCH_SIZE = 512 # Number of examples used in each iteration\n EPOCHS = 80 # Number of passes through entire dataset\n \n# Hyperparams for CPU training\nelse:\n print('---- We are using CPU now ----')\n # CPU\n BATCH_SIZE = 256\n EPOCHS = 100", "---- We are using CPU now ----\n" ] ], [ [ "## Data\n\nThe wine reviews dataset is already attached to your workspace (if you want to attach your own data, [check out our docs](https://docs.floydhub.com/guides/workspace/#attaching-floydhub-datasets)).\n\nLet's take a look at data.", "_____no_output_____" ] ], [ [ "data_path = '/floyd/input/gengduoshuju/' # ADD path/to/dataset\nY= pickle.load( open(os.path.join(data_path,'Y.pks'), \"rb\" ) )\nX= pickle.load( open(os.path.join(data_path,'X.pks'), \"rb\" ) )\nX = X.reshape((X.shape[0],X.shape[1],1))\nprint(\"Size of X :\" + str(X.shape))\nprint(\"Size of Y :\" + str(Y.shape))\nX = X.astype(np.float64)\nX = np.nan_to_num(X)", "Size of X :(412038, 240, 1)\nSize of Y :(412038,)\n" ] ], [ [ "## Data Preprocessing", "_____no_output_____" ] ], [ [ "X_train, X_test, Y_train_orig,Y_test_orig= helper.divide_data(X,Y)\nprint(Y.min())\nprint(Y.max())\nnum_classes = 332\nY_train = keras.utils.to_categorical(Y_train_orig, num_classes)\nY_test = keras.utils.to_categorical(Y_test_orig, num_classes)\nprint(\"number of training examples = \" + str(X_train.shape[0]))\nprint(\"number of test examples = \" + str(X_test.shape[0]))\nprint(\"X_train shape: \" + str(X_train.shape))\nprint(\"Y_train shape: \" + str(Y_train.shape))\nprint(\"X_test shape: \" + str(X_test.shape))\nprint(\"Y_test shape: \" + str(Y_test.shape))", "0.0\n331.0\nnumber of training examples = 403797\nnumber of test examples = 8241\nX_train shape: (403797, 240, 1)\nY_train shape: (403797, 332)\nX_test shape: (8241, 240, 1)\nY_test shape: (8241, 332)\n" ], [ "input_shape = X_train.shape[1:]\nprint(input_shape)", "(240, 1)\n" ] ], [ [ "# Model definition", "_____no_output_____" ], [ "The *Tokens per sentence* plot (see above) is useful for setting the `MAX_LEN` training hyperparameter.", "_____no_output_____" ] ], [ [ "# ===================================================================================\n# Load the model what has already ben trained\n# ===================================================================================\n\nmodel = load_model(r\"floyd_model_xxl_data_ver8.h5\")", "_____no_output_____" ] ], [ [ "# Model Training", "_____no_output_____" ] ], [ [ "opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)\nmodel.compile(loss='categorical_crossentropy',\n optimizer=opt,\n metrics=['accuracy'])\n\nmodel.summary()\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\nmodel.fit(X_train, Y_train,\n batch_size=BATCH_SIZE,\n epochs=EPOCHS,\n validation_data=(X_test, Y_test),\n shuffle=True)\n\nmodel.save(r\"floyd_model_xxl_data_ver9.h5\")\nprint('Training is done!')", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nconv1d_1 (Conv1D) (None, 240, 16) 80 \n_________________________________________________________________\nactivation_1 (Activation) (None, 240, 16) 0 \n_________________________________________________________________\nmax_pooling1d_1 (MaxPooling1 (None, 120, 16) 0 \n_________________________________________________________________\nconv1d_2 (Conv1D) (None, 120, 32) 2080 \n_________________________________________________________________\nactivation_2 (Activation) (None, 120, 32) 0 \n_________________________________________________________________\nmax_pooling1d_2 (MaxPooling1 (None, 60, 32) 0 \n_________________________________________________________________\nconv1d_3 (Conv1D) (None, 60, 64) 8256 \n_________________________________________________________________\nactivation_3 (Activation) (None, 60, 64) 0 \n_________________________________________________________________\nmax_pooling1d_3 (MaxPooling1 (None, 30, 64) 0 \n_________________________________________________________________\nconv1d_4 (Conv1D) (None, 30, 64) 16448 \n_________________________________________________________________\nactivation_4 (Activation) (None, 30, 64) 0 \n_________________________________________________________________\nmax_pooling1d_4 (MaxPooling1 (None, 15, 64) 0 \n_________________________________________________________________\nconv1d_5 (Conv1D) (None, 15, 32) 8224 \n_________________________________________________________________\nactivation_5 (Activation) (None, 15, 32) 0 \n_________________________________________________________________\nmax_pooling1d_5 (MaxPooling1 (None, 8, 32) 0 \n_________________________________________________________________\nflatten_1 (Flatten) (None, 256) 0 \n_________________________________________________________________\ndense_1 (Dense) (None, 332) 85324 \n_________________________________________________________________\nactivation_6 (Activation) (None, 332) 0 \n=================================================================\nTotal params: 120,412\nTrainable params: 120,412\nNon-trainable params: 0\n_________________________________________________________________\nTrain on 403797 samples, validate on 8241 samples\nEpoch 1/100\n403797/403797 [==============================] - 80s 197us/step - loss: 0.1270 - acc: 0.9520 - val_loss: 0.1445 - val_acc: 0.9475\nEpoch 2/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1261 - acc: 0.9524 - val_loss: 0.1427 - val_acc: 0.9485\nEpoch 3/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1267 - acc: 0.9522 - val_loss: 0.1432 - val_acc: 0.9484\nEpoch 4/100\n403797/403797 [==============================] - 78s 194us/step - loss: 0.1293 - acc: 0.9516 - val_loss: 0.1461 - val_acc: 0.9472\nEpoch 5/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1217 - acc: 0.9544 - val_loss: 0.1377 - val_acc: 0.9509\nEpoch 6/100\n403797/403797 [==============================] - 78s 192us/step - loss: 0.1269 - acc: 0.9527 - val_loss: 0.1720 - val_acc: 0.9379\nEpoch 7/100\n403797/403797 [==============================] - 78s 192us/step - loss: 0.1263 - acc: 0.9526 - val_loss: 0.1432 - val_acc: 0.9453\nEpoch 8/100\n403797/403797 [==============================] - 79s 195us/step - loss: 0.1265 - acc: 0.9527 - val_loss: 0.1417 - val_acc: 0.9495\nEpoch 9/100\n403797/403797 [==============================] - 78s 194us/step - loss: 0.1267 - acc: 0.9524 - val_loss: 0.1412 - val_acc: 0.9470\nEpoch 10/100\n403797/403797 [==============================] - 77s 192us/step - loss: 0.1248 - acc: 0.9531 - val_loss: 0.1595 - val_acc: 0.9414\nEpoch 11/100\n403797/403797 [==============================] - 78s 192us/step - loss: 0.1245 - acc: 0.9531 - val_loss: 0.1502 - val_acc: 0.9461\nEpoch 12/100\n403797/403797 [==============================] - 77s 192us/step - loss: 0.1252 - acc: 0.9530 - val_loss: 0.1338 - val_acc: 0.9498\nEpoch 13/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1242 - acc: 0.9536 - val_loss: 0.1682 - val_acc: 0.9398\nEpoch 14/100\n403797/403797 [==============================] - 79s 196us/step - loss: 0.1249 - acc: 0.9532 - val_loss: 0.1441 - val_acc: 0.9488\nEpoch 15/100\n403797/403797 [==============================] - 79s 196us/step - loss: 0.1273 - acc: 0.9524 - val_loss: 0.1328 - val_acc: 0.9513\nEpoch 16/100\n403797/403797 [==============================] - 79s 195us/step - loss: 0.1199 - acc: 0.9551 - val_loss: 0.1508 - val_acc: 0.9466\nEpoch 17/100\n403797/403797 [==============================] - 79s 197us/step - loss: 0.1234 - acc: 0.9538 - val_loss: 0.1425 - val_acc: 0.9469\nEpoch 18/100\n403797/403797 [==============================] - 79s 197us/step - loss: 0.1257 - acc: 0.9528 - val_loss: 0.1497 - val_acc: 0.9467\nEpoch 19/100\n403797/403797 [==============================] - 79s 195us/step - loss: 0.1211 - acc: 0.9541 - val_loss: 0.1484 - val_acc: 0.9442\nEpoch 20/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1250 - acc: 0.9530 - val_loss: 0.1347 - val_acc: 0.9502\nEpoch 21/100\n403797/403797 [==============================] - 78s 194us/step - loss: 0.1282 - acc: 0.9522 - val_loss: 0.1386 - val_acc: 0.9504\nEpoch 22/100\n403797/403797 [==============================] - 77s 191us/step - loss: 0.1174 - acc: 0.9554 - val_loss: 0.1496 - val_acc: 0.9464\nEpoch 23/100\n403797/403797 [==============================] - 77s 191us/step - loss: 0.1220 - acc: 0.9541 - val_loss: 0.1403 - val_acc: 0.9478\nEpoch 24/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1219 - acc: 0.9542 - val_loss: 0.1309 - val_acc: 0.9529\nEpoch 25/100\n403797/403797 [==============================] - 79s 195us/step - loss: 0.1216 - acc: 0.9544 - val_loss: 0.1484 - val_acc: 0.9450\nEpoch 26/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1208 - acc: 0.9541 - val_loss: 0.1455 - val_acc: 0.9456\nEpoch 27/100\n403797/403797 [==============================] - 78s 192us/step - loss: 0.1211 - acc: 0.9544 - val_loss: 0.1474 - val_acc: 0.9447\nEpoch 28/100\n403797/403797 [==============================] - 78s 194us/step - loss: 0.1183 - acc: 0.9555 - val_loss: 0.1374 - val_acc: 0.9487\nEpoch 37/100\n403797/403797 [==============================] - 78s 193us/step - loss: 0.1224 - acc: 0.9540 - val_loss: 0.1818 - val_acc: 0.9357\nEpoch 38/100\n403797/403797 [==============================] - 77s 191us/step - loss: 0.1188 - acc: 0.9551 - val_loss: 0.1339 - val_acc: 0.9510\nEpoch 39/100\n403797/403797 [==============================] - 77s 191us/step - loss: 0.1184 - acc: 0.9555 - val_loss: 0.1432 - val_acc: 0.9472\nEpoch 40/100\n 50688/403797 [==>...........................] - ETA: 1:08 - loss: 0.1228 - acc: 0.9541" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0155dc060599e77d6f956bd2dd160f361891a7c
198,630
ipynb
Jupyter Notebook
experiments/cnn_1/oracle.run1_limited/trials/12/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
experiments/cnn_1/oracle.run1_limited/trials/12/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
experiments/cnn_1/oracle.run1_limited/trials/12/trial.ipynb
stevester94/csc500-notebooks
4c1b04c537fe233a75bed82913d9d84985a89177
[ "MIT" ]
null
null
null
115.214617
73,468
0.817525
[ [ [ "import os, json, sys, time, random\nimport numpy as np\nimport torch\nfrom easydict import EasyDict\nfrom math import floor\nfrom easydict import EasyDict\n\nfrom steves_utils.vanilla_train_eval_test_jig import Vanilla_Train_Eval_Test_Jig\n\nfrom steves_utils.torch_utils import get_dataset_metrics, independent_accuracy_assesment\nfrom steves_models.configurable_vanilla import Configurable_Vanilla\nfrom steves_utils.torch_sequential_builder import build_sequential\nfrom steves_utils.lazy_map import Lazy_Map\nfrom steves_utils.sequence_aggregator import Sequence_Aggregator\n\nfrom steves_utils.stratified_dataset.traditional_accessor import Traditional_Accessor_Factory\n\nfrom steves_utils.cnn_do_report import (\n get_loss_curve,\n get_results_table,\n get_parameters_table,\n get_domain_accuracies,\n)\n\nfrom steves_utils.torch_utils import (\n confusion_by_domain_over_dataloader,\n independent_accuracy_assesment\n)\n\nfrom steves_utils.utils_v2 import (\n per_domain_accuracy_from_confusion,\n get_datasets_base_path\n)\n\n# from steves_utils.ptn_do_report import TBD", "_____no_output_____" ], [ "required_parameters = {\n \"experiment_name\",\n \"lr\",\n \"device\",\n \"dataset_seed\",\n \"seed\",\n \"labels\",\n \"domains_target\",\n \"domains_source\",\n \"num_examples_per_domain_per_label_source\",\n \"num_examples_per_domain_per_label_target\",\n \"batch_size\",\n \"n_epoch\",\n \"patience\",\n \"criteria_for_best\",\n \"normalize_source\",\n \"normalize_target\",\n \"x_net\",\n \"NUM_LOGS_PER_EPOCH\",\n \"BEST_MODEL_PATH\",\n \"pickle_name_source\",\n \"pickle_name_target\",\n \"torch_default_dtype\",\n}", "_____no_output_____" ], [ "from steves_utils.ORACLE.utils_v2 import (\n ALL_SERIAL_NUMBERS,\n ALL_DISTANCES_FEET_NARROWED,\n)\n\nstandalone_parameters = {}\nstandalone_parameters[\"experiment_name\"] = \"MANUAL CORES CNN\"\nstandalone_parameters[\"lr\"] = 0.0001\nstandalone_parameters[\"device\"] = \"cuda\"\n\nstandalone_parameters[\"dataset_seed\"] = 1337\nstandalone_parameters[\"seed\"] = 1337\nstandalone_parameters[\"labels\"] = ALL_SERIAL_NUMBERS\n\nstandalone_parameters[\"domains_source\"] = [8,32,50]\nstandalone_parameters[\"domains_target\"] = [14,20,26,38,44,]\n\nstandalone_parameters[\"num_examples_per_domain_per_label_source\"]=-1\nstandalone_parameters[\"num_examples_per_domain_per_label_target\"]=-1\n\nstandalone_parameters[\"pickle_name_source\"] = \"oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl\"\nstandalone_parameters[\"pickle_name_target\"] = \"oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl\"\n\nstandalone_parameters[\"torch_default_dtype\"] = \"torch.float32\" \n\nstandalone_parameters[\"batch_size\"]=128\n\nstandalone_parameters[\"n_epoch\"] = 3\n\nstandalone_parameters[\"patience\"] = 10\n\nstandalone_parameters[\"criteria_for_best\"] = \"target_accuracy\"\nstandalone_parameters[\"normalize_source\"] = False\nstandalone_parameters[\"normalize_target\"] = False\n\nstandalone_parameters[\"x_net\"] = [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\":[-1, 1, 2, 256]}},\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":1, \"out_channels\":256, \"kernel_size\":(1,7), \"bias\":False, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Conv2d\", \"kargs\": { \"in_channels\":256, \"out_channels\":80, \"kernel_size\":(2,7), \"bias\":True, \"padding\":(0,3), },},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\":80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 80*256, \"out_features\": 256}}, # 80 units per IQ pair\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\":256}},\n\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": len(standalone_parameters[\"labels\"])}},\n]\n\nstandalone_parameters[\"NUM_LOGS_PER_EPOCH\"] = 10\nstandalone_parameters[\"BEST_MODEL_PATH\"] = \"./best_model.pth\"", "_____no_output_____" ], [ "# Parameters\nparameters = {\n \"experiment_name\": \"cnn_1:oracle.run1_limited\",\n \"labels\": [\n \"3123D52\",\n \"3123D65\",\n \"3123D79\",\n \"3123D80\",\n \"3123D54\",\n \"3123D70\",\n \"3123D7B\",\n \"3123D89\",\n \"3123D58\",\n \"3123D76\",\n \"3123D7D\",\n \"3123EFE\",\n \"3123D64\",\n \"3123D78\",\n \"3123D7E\",\n \"3124E4A\",\n ],\n \"domains_source\": [8, 32, 50],\n \"domains_target\": [14, 20, 26, 38, 44],\n \"pickle_name_source\": \"oracle.Run1_10kExamples_stratified_ds.2022A.pkl\",\n \"pickle_name_target\": \"oracle.Run1_10kExamples_stratified_ds.2022A.pkl\",\n \"device\": \"cuda\",\n \"lr\": 0.0001,\n \"batch_size\": 128,\n \"normalize_source\": False,\n \"normalize_target\": False,\n \"num_examples_per_domain_per_label_source\": 2000,\n \"num_examples_per_domain_per_label_target\": 2000,\n \"torch_default_dtype\": \"torch.float32\",\n \"n_epoch\": 50,\n \"patience\": 3,\n \"criteria_for_best\": \"target_accuracy\",\n \"x_net\": [\n {\"class\": \"nnReshape\", \"kargs\": {\"shape\": [-1, 1, 2, 256]}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 1,\n \"out_channels\": 256,\n \"kernel_size\": [1, 7],\n \"bias\": False,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 256}},\n {\n \"class\": \"Conv2d\",\n \"kargs\": {\n \"in_channels\": 256,\n \"out_channels\": 80,\n \"kernel_size\": [2, 7],\n \"bias\": True,\n \"padding\": [0, 3],\n },\n },\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm2d\", \"kargs\": {\"num_features\": 80}},\n {\"class\": \"Flatten\", \"kargs\": {}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 20480, \"out_features\": 256}},\n {\"class\": \"ReLU\", \"kargs\": {\"inplace\": True}},\n {\"class\": \"BatchNorm1d\", \"kargs\": {\"num_features\": 256}},\n {\"class\": \"Linear\", \"kargs\": {\"in_features\": 256, \"out_features\": 16}},\n ],\n \"NUM_LOGS_PER_EPOCH\": 10,\n \"BEST_MODEL_PATH\": \"./best_model.pth\",\n \"dataset_seed\": 7,\n \"seed\": 7,\n}\n", "_____no_output_____" ], [ "# Set this to True if you want to run this template directly\nSTANDALONE = False\nif STANDALONE:\n print(\"parameters not injected, running with standalone_parameters\")\n parameters = standalone_parameters\n\nif not 'parameters' in locals() and not 'parameters' in globals():\n raise Exception(\"Parameter injection failed\")\n\n#Use an easy dict for all the parameters\np = EasyDict(parameters)\n\nsupplied_keys = set(p.keys())\n\nif supplied_keys != required_parameters:\n print(\"Parameters are incorrect\")\n if len(supplied_keys - required_parameters)>0: print(\"Shouldn't have:\", str(supplied_keys - required_parameters))\n if len(required_parameters - supplied_keys)>0: print(\"Need to have:\", str(required_parameters - supplied_keys))\n raise RuntimeError(\"Parameters are incorrect\")\n\n", "_____no_output_____" ], [ "###################################\n# Set the RNGs and make it all deterministic\n###################################\nnp.random.seed(p.seed)\nrandom.seed(p.seed)\ntorch.manual_seed(p.seed)\n\ntorch.use_deterministic_algorithms(True) ", "_____no_output_____" ], [ "torch.set_default_dtype(eval(p.torch_default_dtype))", "_____no_output_____" ], [ "###################################\n# Build the network(s)\n# Note: It's critical to do this AFTER setting the RNG\n###################################\nx_net = build_sequential(p.x_net)", "_____no_output_____" ], [ "start_time_secs = time.time()", "_____no_output_____" ], [ "def wrap_in_dataloader(p, ds):\n return torch.utils.data.DataLoader(\n ds,\n batch_size=p.batch_size,\n shuffle=True,\n num_workers=1,\n persistent_workers=True,\n prefetch_factor=50,\n pin_memory=True\n )\n\ntaf_source = Traditional_Accessor_Factory(\n labels=p.labels,\n domains=p.domains_source,\n num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,\n pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_source),\n seed=p.dataset_seed\n)\ntrain_original_source, val_original_source, test_original_source = \\\n taf_source.get_train(), taf_source.get_val(), taf_source.get_test()\n\n\ntaf_target = Traditional_Accessor_Factory(\n labels=p.labels,\n domains=p.domains_target,\n num_examples_per_domain_per_label=p.num_examples_per_domain_per_label_source,\n pickle_path=os.path.join(get_datasets_base_path(), p.pickle_name_target),\n seed=p.dataset_seed\n)\ntrain_original_target, val_original_target, test_original_target = \\\n taf_target.get_train(), taf_target.get_val(), taf_target.get_test()\n\n\n# For CNN We only use X and Y. And we only train on the source.\n# Properly form the data using a transform lambda and Lazy_Map. Finally wrap them in a dataloader\n\ntransform_lambda = lambda ex: ex[:2] # Strip the tuple to just (x,y)\n\n\ntrain_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(train_original_source, transform_lambda)\n)\nval_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(val_original_source, transform_lambda)\n)\ntest_processed_source = wrap_in_dataloader(\n p,\n Lazy_Map(test_original_source, transform_lambda)\n)\n\ntrain_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(train_original_target, transform_lambda)\n)\nval_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(val_original_target, transform_lambda)\n)\ntest_processed_target = wrap_in_dataloader(\n p,\n Lazy_Map(test_original_target, transform_lambda)\n)\n\n\n\ndatasets = EasyDict({\n \"source\": {\n \"original\": {\"train\":train_original_source, \"val\":val_original_source, \"test\":test_original_source},\n \"processed\": {\"train\":train_processed_source, \"val\":val_processed_source, \"test\":test_processed_source}\n },\n \"target\": {\n \"original\": {\"train\":train_original_target, \"val\":val_original_target, \"test\":test_original_target},\n \"processed\": {\"train\":train_processed_target, \"val\":val_processed_target, \"test\":test_processed_target}\n },\n})", "_____no_output_____" ], [ "ep = next(iter(test_processed_target))\nep[0].dtype", "_____no_output_____" ], [ "model = Configurable_Vanilla(\n x_net=x_net,\n label_loss_object=torch.nn.NLLLoss(),\n learning_rate=p.lr\n)", "_____no_output_____" ], [ "jig = Vanilla_Train_Eval_Test_Jig(\n model=model,\n path_to_best_model=p.BEST_MODEL_PATH,\n device=p.device,\n label_loss_object=torch.nn.NLLLoss(),\n)\n\njig.train(\n train_iterable=datasets.source.processed.train,\n source_val_iterable=datasets.source.processed.val,\n target_val_iterable=datasets.target.processed.val,\n patience=p.patience,\n num_epochs=p.n_epoch,\n num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,\n criteria_for_best=p.criteria_for_best\n)", "epoch: 1, [batch: 1 / 525], examples_per_second: 709.1515, train_label_loss: 2.7689, \n" ], [ "total_experiment_time_secs = time.time() - start_time_secs", "_____no_output_____" ], [ "source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)\ntarget_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)\n\nsource_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)\ntarget_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)\n\nhistory = jig.get_history()\n\ntotal_epochs_trained = len(history[\"epoch_indices\"])\n\nval_dl = wrap_in_dataloader(p, Sequence_Aggregator((datasets.source.original.val, datasets.target.original.val)))\n\nconfusion = confusion_by_domain_over_dataloader(model, p.device, val_dl, forward_uses_domain=False)\nper_domain_accuracy = per_domain_accuracy_from_confusion(confusion)\n\n# Add a key to per_domain_accuracy for if it was a source domain\nfor domain, accuracy in per_domain_accuracy.items():\n per_domain_accuracy[domain] = {\n \"accuracy\": accuracy,\n \"source?\": domain in p.domains_source\n }\n\n# Do an independent accuracy assesment JUST TO BE SURE!\n# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)\n# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)\n# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)\n# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)\n\n# assert(_source_test_label_accuracy == source_test_label_accuracy)\n# assert(_target_test_label_accuracy == target_test_label_accuracy)\n# assert(_source_val_label_accuracy == source_val_label_accuracy)\n# assert(_target_val_label_accuracy == target_val_label_accuracy)\n\n###################################\n# Write out the results\n###################################\n\nexperiment = {\n \"experiment_name\": p.experiment_name,\n \"parameters\": p,\n \"results\": {\n \"source_test_label_accuracy\": source_test_label_accuracy,\n \"source_test_label_loss\": source_test_label_loss,\n \"target_test_label_accuracy\": target_test_label_accuracy,\n \"target_test_label_loss\": target_test_label_loss,\n \"source_val_label_accuracy\": source_val_label_accuracy,\n \"source_val_label_loss\": source_val_label_loss,\n \"target_val_label_accuracy\": target_val_label_accuracy,\n \"target_val_label_loss\": target_val_label_loss,\n \"total_epochs_trained\": total_epochs_trained,\n \"total_experiment_time_secs\": total_experiment_time_secs,\n \"confusion\": confusion,\n \"per_domain_accuracy\": per_domain_accuracy,\n },\n \"history\": history,\n \"dataset_metrics\": get_dataset_metrics(datasets, \"cnn\"),\n}", "_____no_output_____" ], [ "get_loss_curve(experiment)", "_____no_output_____" ], [ "get_results_table(experiment)", "_____no_output_____" ], [ "get_domain_accuracies(experiment)", "_____no_output_____" ], [ "print(\"Source Test Label Accuracy:\", experiment[\"results\"][\"source_test_label_accuracy\"], \"Target Test Label Accuracy:\", experiment[\"results\"][\"target_test_label_accuracy\"])\nprint(\"Source Val Label Accuracy:\", experiment[\"results\"][\"source_val_label_accuracy\"], \"Target Val Label Accuracy:\", experiment[\"results\"][\"target_val_label_accuracy\"])", "Source Test Label Accuracy: 0.48659722222222224 Target Test Label Accuracy: 0.08275\nSource Val Label Accuracy: 0.4955555555555556 Target Val Label Accuracy: 0.08216666666666667\n" ], [ "json.dumps(experiment)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01562473e6c80076869317db87fd9c26d77ed6c
20,219
ipynb
Jupyter Notebook
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
PeterFogh/deepLearningBook-Notes
75d35cc63b8577b4ebe400e9db536138b956f88f
[ "MIT" ]
2
2022-02-06T12:57:28.000Z
2022-02-08T18:23:00.000Z
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
AarthiKasirajan/deepLearningBook-Notes
75d35cc63b8577b4ebe400e9db536138b956f88f
[ "MIT" ]
null
null
null
2.1 Scalars, Vectors, Matrices and Tensors/2.1 Scalars Vectors Matrices and Tensors.ipynb
AarthiKasirajan/deepLearningBook-Notes
75d35cc63b8577b4ebe400e9db536138b956f88f
[ "MIT" ]
null
null
null
24.567436
562
0.477472
[ [ [ "import numpy as np", "_____no_output_____" ], [ "%%html\n<style>\n.pquote {\n text-align: left;\n margin: 40px 0 40px auto;\n width: 70%;\n font-size: 1.5em;\n font-style: italic;\n display: block;\n line-height: 1.3em;\n color: #5a75a7;\n font-weight: 600;\n border-left: 5px solid rgba(90, 117, 167, .1);\n padding-left: 6px;\n}\n.notes {\n font-style: italic;\n display: block;\n margin: 40px 10%;\n}\n</style>", "_____no_output_____" ] ], [ [ "$$\n\\newcommand\\bs[1]{\\boldsymbol{#1}}\n$$", "_____no_output_____" ], [ "<span class='notes'>\n This content is part of a series following the chapter 2 on linear algebra from the [Deep Learning Book](http://www.deeplearningbook.org/) by Goodfellow, I., Bengio, Y., and Courville, A. (2016). It aims to provide intuitions/drawings/python code on mathematical theories and is constructed as my understanding of these concepts. You can check the syllabus in the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/).\n</span>", "_____no_output_____" ], [ "# Introduction\n\nThis is the first post/notebook of a series following the syllabus of the [linear algebra chapter from the Deep Learning Book](http://www.deeplearningbook.org/contents/linear_algebra.html) by Goodfellow et al.. This work is a collection of thoughts/details/developements/examples I made while reading this chapter. It is designed to help you go through their introduction to linear algebra. For more details about this series and the syllabus, please see the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/).\n\nThis first chapter is quite light and concerns the basic elements used in linear algebra and their definitions. It also introduces important functions in Python/Numpy that we will use all along this series. It will explain how to create and use vectors and matrices through examples.", "_____no_output_____" ], [ "# 2.1 Scalars, Vectors, Matrices and Tensors\n\nLet's start with some basic definitions:\n\n<img src=\"images/scalar-tensor.png\" width=\"400\" alt=\"scalar-tensor\">\n\n- A scalar is a single number\n- A vector is an array of numbers.\n\n$$\n\\bs{x} =\\begin{bmatrix}\n x_1 \\\\\\\\\n x_2 \\\\\\\\\n \\cdots \\\\\\\\\n x_n\n\\end{bmatrix}\n$$\n\n- A matrix is a 2-D array\n\n$$\n\\bs{A}=\n\\begin{bmatrix}\n A_{1,1} & A_{1,2} & \\cdots & A_{1,n} \\\\\\\\\n A_{2,1} & A_{2,2} & \\cdots & A_{2,n} \\\\\\\\\n \\cdots & \\cdots & \\cdots & \\cdots \\\\\\\\\n A_{m,1} & A_{m,2} & \\cdots & A_{m,n}\n\\end{bmatrix}\n$$\n\n- A tensor is a $n$-dimensional array with $n>2$\n\nWe will follow the conventions used in the [Deep Learning Book](http://www.deeplearningbook.org/):\n\n- scalars are written in lowercase and italics. For instance: $n$\n- vectors are written in lowercase, italics and bold type. For instance: $\\bs{x}$\n- matrices are written in uppercase, italics and bold. For instance: $\\bs{X}$", "_____no_output_____" ], [ "### Example 1.\n\n#### Create a vector with Python and Numpy\n\n*Coding tip*: Unlike the `matrix()` function which necessarily creates $2$-dimensional matrices, you can create $n$-dimensionnal arrays with the `array()` function. The main advantage to use `matrix()` is the useful methods (conjugate transpose, inverse, matrix operations...). We will use the `array()` function in this series.\n\nWe will start by creating a vector. This is just a $1$-dimensional array:", "_____no_output_____" ] ], [ [ "x = np.array([1, 2, 3, 4])\nx", "_____no_output_____" ] ], [ [ "### Example 2.\n\n#### Create a (3x2) matrix with nested brackets\n\nThe `array()` function can also create $2$-dimensional arrays with nested brackets:", "_____no_output_____" ] ], [ [ "A = np.array([[1, 2], [3, 4], [5, 6]])\nA", "_____no_output_____" ] ], [ [ "### Shape\n\nThe shape of an array (that is to say its dimensions) tells you the number of values for each dimension. For a $2$-dimensional array it will give you the number of rows and the number of columns. Let's find the shape of our preceding $2$-dimensional array `A`. Since `A` is a Numpy array (it was created with the `array()` function) you can access its shape with:", "_____no_output_____" ] ], [ [ "A.shape", "_____no_output_____" ] ], [ [ "We can see that $\\bs{A}$ has 3 rows and 2 columns.\n\nLet's check the shape of our first vector:", "_____no_output_____" ] ], [ [ "x.shape", "_____no_output_____" ] ], [ [ "As expected, you can see that $\\bs{x}$ has only one dimension. The number corresponds to the length of the array:", "_____no_output_____" ] ], [ [ "len(x)", "_____no_output_____" ] ], [ [ "# Transposition\n\nWith transposition you can convert a row vector to a column vector and vice versa:\n\n<img src=\"images/transposeVector.png\" alt=\"transposeVector\" width=\"200\">\n\nThe transpose $\\bs{A}^{\\text{T}}$ of the matrix $\\bs{A}$ corresponds to the mirrored axes. If the matrix is a square matrix (same number of columns and rows):\n\n<img src=\"images/transposeMatrixSquare.png\" alt=\"transposeMatrixSquare\" width=\"300\">\n\nIf the matrix is not square the idea is the same:\n\n<img src=\"images/transposeMatrix.png\" alt=\"transposeMatrix\" width=\"300\">\n\n\nThe superscript $^\\text{T}$ is used for transposed matrices.\n\n$$\n\\bs{A}=\n\\begin{bmatrix}\n A_{1,1} & A_{1,2} \\\\\\\\\n A_{2,1} & A_{2,2} \\\\\\\\\n A_{3,1} & A_{3,2}\n\\end{bmatrix}\n$$\n\n$$\n\\bs{A}^{\\text{T}}=\n\\begin{bmatrix}\n A_{1,1} & A_{2,1} & A_{3,1} \\\\\\\\\n A_{1,2} & A_{2,2} & A_{3,2}\n\\end{bmatrix}\n$$\n\nThe shape ($m \\times n$) is inverted and becomes ($n \\times m$).\n\n<img src=\"images/transposeMatrixDim.png\" alt=\"transposeMatrixDim\" width=\"300\">", "_____no_output_____" ], [ "### Example 3.\n\n#### Create a matrix A and transpose it", "_____no_output_____" ] ], [ [ "A = np.array([[1, 2], [3, 4], [5, 6]])\nA", "_____no_output_____" ], [ "A_t = A.T\nA_t", "_____no_output_____" ] ], [ [ "We can check the dimensions of the matrices:", "_____no_output_____" ] ], [ [ "A.shape", "_____no_output_____" ], [ "A_t.shape", "_____no_output_____" ] ], [ [ "We can see that the number of columns becomes the number of rows with transposition and vice versa.", "_____no_output_____" ], [ "# Addition\n\n<img src=\"images/additionMatrix.png\" alt=\"additionMatrix\" width=\"300\">\n\nMatrices can be added if they have the same shape:\n\n$$\\bs{A} + \\bs{B} = \\bs{C}$$\n\nEach cell of $\\bs{A}$ is added to the corresponding cell of $\\bs{B}$:\n\n$$\\bs{A}_{i,j} + \\bs{B}_{i,j} = \\bs{C}_{i,j}$$\n\n$i$ is the row index and $j$ the column index.\n\n$$\n\\begin{bmatrix}\n A_{1,1} & A_{1,2} \\\\\\\\\n A_{2,1} & A_{2,2} \\\\\\\\\n A_{3,1} & A_{3,2}\n\\end{bmatrix}+\n\\begin{bmatrix}\n B_{1,1} & B_{1,2} \\\\\\\\\n B_{2,1} & B_{2,2} \\\\\\\\\n B_{3,1} & B_{3,2}\n\\end{bmatrix}=\n\\begin{bmatrix}\n A_{1,1} + B_{1,1} & A_{1,2} + B_{1,2} \\\\\\\\\n A_{2,1} + B_{2,1} & A_{2,2} + B_{2,2} \\\\\\\\\n A_{3,1} + B_{3,1} & A_{3,2} + B_{3,2}\n\\end{bmatrix}\n$$\n\nThe shape of $\\bs{A}$, $\\bs{B}$ and $\\bs{C}$ are identical. Let's check that in an example:", "_____no_output_____" ], [ "### Example 4.\n\n#### Create two matrices A and B and add them\n\nWith Numpy you can add matrices just as you would add vectors or scalars.", "_____no_output_____" ] ], [ [ "A = np.array([[1, 2], [3, 4], [5, 6]])\nA", "_____no_output_____" ], [ "B = np.array([[2, 5], [7, 4], [4, 3]])\nB", "_____no_output_____" ], [ "# Add matrices A and B\nC = A + B\nC", "_____no_output_____" ] ], [ [ "It is also possible to add a scalar to a matrix. This means adding this scalar to each cell of the matrix.\n\n$$\n\\alpha+ \\begin{bmatrix}\n A_{1,1} & A_{1,2} \\\\\\\\\n A_{2,1} & A_{2,2} \\\\\\\\\n A_{3,1} & A_{3,2}\n\\end{bmatrix}=\n\\begin{bmatrix}\n \\alpha + A_{1,1} & \\alpha + A_{1,2} \\\\\\\\\n \\alpha + A_{2,1} & \\alpha + A_{2,2} \\\\\\\\\n \\alpha + A_{3,1} & \\alpha + A_{3,2}\n\\end{bmatrix}\n$$", "_____no_output_____" ], [ "### Example 5.\n\n#### Add a scalar to a matrix", "_____no_output_____" ] ], [ [ "A", "_____no_output_____" ], [ "# Exemple: Add 4 to the matrix A\nC = A+4\nC", "_____no_output_____" ] ], [ [ "# Broadcasting\n\nNumpy can handle operations on arrays of different shapes. The smaller array will be extended to match the shape of the bigger one. The advantage is that this is done in `C` under the hood (like any vectorized operations in Numpy). Actually, we used broadcasting in the example 5. The scalar was converted in an array of same shape as $\\bs{A}$.\n\nHere is another generic example:\n\n$$\n\\begin{bmatrix}\n A_{1,1} & A_{1,2} \\\\\\\\\n A_{2,1} & A_{2,2} \\\\\\\\\n A_{3,1} & A_{3,2}\n\\end{bmatrix}+\n\\begin{bmatrix}\n B_{1,1} \\\\\\\\\n B_{2,1} \\\\\\\\\n B_{3,1}\n\\end{bmatrix}\n$$\n\nis equivalent to\n\n$$\n\\begin{bmatrix}\n A_{1,1} & A_{1,2} \\\\\\\\\n A_{2,1} & A_{2,2} \\\\\\\\\n A_{3,1} & A_{3,2}\n\\end{bmatrix}+\n\\begin{bmatrix}\n B_{1,1} & B_{1,1} \\\\\\\\\n B_{2,1} & B_{2,1} \\\\\\\\\n B_{3,1} & B_{3,1}\n\\end{bmatrix}=\n\\begin{bmatrix}\n A_{1,1} + B_{1,1} & A_{1,2} + B_{1,1} \\\\\\\\\n A_{2,1} + B_{2,1} & A_{2,2} + B_{2,1} \\\\\\\\\n A_{3,1} + B_{3,1} & A_{3,2} + B_{3,1}\n\\end{bmatrix}\n$$\n\nwhere the ($3 \\times 1$) matrix is converted to the right shape ($3 \\times 2$) by copying the first column. Numpy will do that automatically if the shapes can match.", "_____no_output_____" ], [ "### Example 6.\n\n#### Add two matrices of different shapes", "_____no_output_____" ] ], [ [ "A = np.array([[1, 2], [3, 4], [5, 6]])\nA", "_____no_output_____" ], [ "B = np.array([[2], [4], [6]])\nB", "_____no_output_____" ], [ "# Broadcasting\nC=A+B\nC", "_____no_output_____" ] ], [ [ "You can find basics operations on matrices simply explained [here](https://www.mathsisfun.com/algebra/matrix-introduction.html).", "_____no_output_____" ], [ "<span class='notes'>\n Feel free to drop me an email or a comment. The syllabus of this series can be found [in the introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). All the notebooks can be found on [Github](https://github.com/hadrienj/deepLearningBook-Notes).\n</span>", "_____no_output_____" ], [ "# References\n\n- [Broadcasting in Numpy](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)\n\n- [Discussion on Arrays and matrices](https://stackoverflow.com/questions/4151128/what-are-the-differences-between-numpy-arrays-and-matrices-which-one-should-i-u)\n\n- [Math is fun - Matrix introduction](https://www.mathsisfun.com/algebra/matrix-introduction.html)", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "code", "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown", "markdown" ] ]
d015697dc71f7cfb7e8ad2a886e554ffae52612a
298,320
ipynb
Jupyter Notebook
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
9df231bfd4010e3a13efc66585474e148cd08d6c
[ "MIT" ]
null
null
null
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
9df231bfd4010e3a13efc66585474e148cd08d6c
[ "MIT" ]
null
null
null
examples/filters/reference/distance_transform_lin.ipynb
xu-kai-xu/porespy
9df231bfd4010e3a13efc66585474e148cd08d6c
[ "MIT" ]
null
null
null
1,724.393064
163,792
0.961695
[ [ [ "# `distance_transform_lin`\nA variant of the standard distance transform where the distances are computed along a give axis rather than radially.", "_____no_output_____" ] ], [ [ "import numpy as np\nimport porespy as ps\nimport scipy.ndimage as spim\nimport matplotlib.pyplot as plt", "_____no_output_____" ] ], [ [ "The arguments and their defaults are:", "_____no_output_____" ] ], [ [ "import inspect\ninspect.signature(ps.filters.distance_transform_lin)", "_____no_output_____" ] ], [ [ "## `axis`\nThe axis along which the distances should be computed", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 2, figsize=[12, 6])\n\nim = ps.generators.blobs(shape=[500, 500], porosity=0.7)\n\naxis = 0\ndt = ps.filters.distance_transform_lin(im, axis=axis)\n\nax[0].imshow(dt/im)\nax[0].axis(False)\nax[0].set_title(f'axis = {axis}')\n\naxis = 1\ndt = ps.filters.distance_transform_lin(im, axis=axis)\n\nax[1].imshow(dt/im)\nax[1].axis(False)\nax[1].set_title(f'axis = {axis}');", "_____no_output_____" ] ], [ [ "## `mode`\nWhether the distances are comptuted from the start to end, end to start, or both.", "_____no_output_____" ] ], [ [ "fig, ax = plt.subplots(1, 3, figsize=[15, 5])\n\nim = ps.generators.blobs(shape=[500, 500], porosity=0.7)\n\nmode = 'forward'\ndt = ps.filters.distance_transform_lin(im, mode=mode)\n\nax[0].imshow(dt/im)\nax[0].axis(False)\nax[0].set_title(f'mode = {mode}')\n\nmode = 'reverse'\ndt = ps.filters.distance_transform_lin(im, mode=mode)\n\nax[1].imshow(dt/im)\nax[1].axis(False)\nax[1].set_title(f'mode = {mode}')\n\nmode = 'both'\ndt = ps.filters.distance_transform_lin(im, mode=mode)\n\nax[2].imshow(dt/im)\nax[2].axis(False)\nax[2].set_title(f'mode = {mode}');", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d01578784f0f2740df6a9f6a0dcf62855011a33b
39,364
ipynb
Jupyter Notebook
lab8/finite_pincell_depletion/p_d1p6/.ipynb_checkpoints/finite_pincell-checkpoint.ipynb
stu314159/er362_omc
951578e9fc6279cad7090794f81a221abe1892d0
[ "MIT" ]
null
null
null
lab8/finite_pincell_depletion/p_d1p6/.ipynb_checkpoints/finite_pincell-checkpoint.ipynb
stu314159/er362_omc
951578e9fc6279cad7090794f81a221abe1892d0
[ "MIT" ]
null
null
null
lab8/finite_pincell_depletion/p_d1p6/.ipynb_checkpoints/finite_pincell-checkpoint.ipynb
stu314159/er362_omc
951578e9fc6279cad7090794f81a221abe1892d0
[ "MIT" ]
null
null
null
83.221987
16,072
0.730312
[ [ [ "import openmc\nimport openmc.deplete\n\n%matplotlib inline\nimport numpy as np", "_____no_output_____" ], [ "fuel = openmc.Material(name=\"uo2\")\nfuel.add_element(\"U\", 1, percent_type=\"ao\", enrichment=4.25)\nfuel.add_element(\"O\", 2)\nfuel.set_density(\"g/cc\", 10.4)\n\nclad = openmc.Material(name='clad');\nclad.add_element(\"Zr\",1);\nclad.set_density('g/cc',6.0);\n\nwater = openmc.Material(name='water');\nwater.add_element('O',1);\nwater.add_element('H',2)\nwater.set_density('g/cc',0.712); # high temperature density\nwater.add_s_alpha_beta('c_H_in_H2O');\n\nmaterials = openmc.Materials([fuel,clad,water]);", "_____no_output_____" ], [ "h_core = 300.; \nh_fuel = 200.;\n\nr_fuel = 0.42;\nr_pin = 0.45;\n\nP_D = 1.6;\n\npitch = P_D*2*r_pin;\n\nfuel_temp = 900; # K, guess at fuel temperature\nmod_temp = 600; # K, moderator temperature\n\n# fuel cylinder:\nfuel_cyl = openmc.model.RightCircularCylinder([0.,0.,-h_fuel/2.],\n h_fuel, r_fuel);\n\nfuel.volume = np.pi*(r_fuel**2)*h_fuel;\n\n# pin cylinder\npin_cyl = openmc.model.RightCircularCylinder([0.,0.,-(h_fuel+(r_pin-r_fuel))/2.],\n h_fuel+(r_pin-r_fuel)*2.,r_pin);\n\n# pin cell container\ncore_cell = openmc.model.RectangularParallelepiped(-pitch/2.,pitch/2.,\n -pitch/2.,pitch/2.,\n -h_core/2.,h_core/2.,\n boundary_type='reflective');\n\nfuel_cell = openmc.Cell();\nfuel_cell.region = -fuel_cyl\nfuel_cell.fill = fuel;\nfuel_cell.temperature = fuel_temp;\n\n\nclad_cell = openmc.Cell();\nclad_cell.region = +fuel_cyl & -pin_cyl;\nclad_cell.fill = clad;\n\nmod_cell = openmc.Cell();\nmod_cell.region = +pin_cyl & -core_cell;\nmod_cell.fill = water\n\nroot_univ = openmc.Universe();\nroot_univ.add_cells([fuel_cell,clad_cell,mod_cell]);\n\ngeometry = openmc.Geometry();\ngeometry.root_universe = root_univ;\n\n\n\nmaterials.export_to_xml();\ngeometry.export_to_xml();\n\n", "_____no_output_____" ], [ "settings = openmc.Settings();\nsettings.run_mode = 'eigenvalue';\nsettings.particles = 10000;\nsettings.batches = 100;\nsettings.inactive = 25\n\nbox = openmc.stats.Box(lower_left = (-r_fuel,-r_fuel,-h_fuel/2.),\n upper_right = (r_fuel,r_fuel,h_fuel/2.),\n only_fissionable=True);\nsrc = openmc.Source(space=box);\n\nsettings.source = src;\n\nsettings.temperature['method']='interpolation';\n\nsettings.export_to_xml();", "_____no_output_____" ], [ "root_univ.plot(width=(pitch,pitch));", "_____no_output_____" ], [ "openmc.run();", " %%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%\n %%%%%%%%%%%%%%%%%%%%%%%%\n ############### %%%%%%%%%%%%%%%%%%%%%%%%\n ################## %%%%%%%%%%%%%%%%%%%%%%%\n ################### %%%%%%%%%%%%%%%%%%%%%%%\n #################### %%%%%%%%%%%%%%%%%%%%%%\n ##################### %%%%%%%%%%%%%%%%%%%%%\n ###################### %%%%%%%%%%%%%%%%%%%%\n ####################### %%%%%%%%%%%%%%%%%%\n ####################### %%%%%%%%%%%%%%%%%\n ###################### %%%%%%%%%%%%%%%%%\n #################### %%%%%%%%%%%%%%%%%\n ################# %%%%%%%%%%%%%%%%%\n ############### %%%%%%%%%%%%%%%%\n ############ %%%%%%%%%%%%%%%\n ######## %%%%%%%%%%%%%%\n %%%%%%%%%%%\n\n | The OpenMC Monte Carlo Code\n Copyright | 2011-2021 MIT and OpenMC contributors\n License | https://docs.openmc.org/en/latest/license.html\n Version | 0.12.1\n Git SHA1 | d49e6ccb837685e399efdef8b29e46cdc693ccec\n Date/Time | 2021-05-10 16:43:28\n OpenMP Threads | 8\n\n Reading settings XML file...\n Reading cross sections XML file...\n Reading materials XML file...\n Reading geometry XML file...\n Reading U234 from /home/stu/OMC_DATA/endfb71_hdf5/U234.h5\n Reading U235 from /home/stu/OMC_DATA/endfb71_hdf5/U235.h5\n Reading U238 from /home/stu/OMC_DATA/endfb71_hdf5/U238.h5\n Reading U236 from /home/stu/OMC_DATA/endfb71_hdf5/U236.h5\n Reading O16 from /home/stu/OMC_DATA/endfb71_hdf5/O16.h5\n Reading O17 from /home/stu/OMC_DATA/endfb71_hdf5/O17.h5\n Reading Zr90 from /home/stu/OMC_DATA/endfb71_hdf5/Zr90.h5\n Reading Zr91 from /home/stu/OMC_DATA/endfb71_hdf5/Zr91.h5\n Reading Zr92 from /home/stu/OMC_DATA/endfb71_hdf5/Zr92.h5\n Reading Zr94 from /home/stu/OMC_DATA/endfb71_hdf5/Zr94.h5\n Reading Zr96 from /home/stu/OMC_DATA/endfb71_hdf5/Zr96.h5\n Reading H1 from /home/stu/OMC_DATA/endfb71_hdf5/H1.h5\n Reading H2 from /home/stu/OMC_DATA/endfb71_hdf5/H2.h5\n Reading c_H_in_H2O from /home/stu/OMC_DATA/endfb71_hdf5/c_H_in_H2O.h5\n Minimum neutron data temperature: 600.0 K\n Maximum neutron data temperature: 600.0 K\n Preparing distributed cell instances...\n Writing summary.h5 file...\n Maximum neutron transport energy: 20000000.0 eV for U235\n Initializing source particles...\n\n ====================> K EIGENVALUE SIMULATION <====================\n\n Bat./Gen. k Average k\n ========= ======== ====================\n 1/1 1.13330\n 2/1 1.13332\n 3/1 1.12986\n 4/1 1.12473\n 5/1 1.15076\n 6/1 1.11960\n 7/1 1.12288\n 8/1 1.16241\n 9/1 1.12366\n 10/1 1.14383\n 11/1 1.13597\n 12/1 1.11599\n 13/1 1.12917\n 14/1 1.13848\n 15/1 1.12293\n 16/1 1.12045\n 17/1 1.10990\n 18/1 1.11323\n 19/1 1.14220\n 20/1 1.17918\n 21/1 1.15337\n 22/1 1.14134\n 23/1 1.11862\n 24/1 1.11460\n 25/1 1.13786\n 26/1 1.11298\n 27/1 1.14495 1.12896 +/- 0.01598\n 28/1 1.12940 1.12911 +/- 0.00923\n 29/1 1.15678 1.13603 +/- 0.00951\n 30/1 1.13212 1.13525 +/- 0.00741\n 31/1 1.15431 1.13842 +/- 0.00683\n 32/1 1.14129 1.13883 +/- 0.00579\n 33/1 1.13388 1.13821 +/- 0.00505\n 34/1 1.14852 1.13936 +/- 0.00460\n 35/1 1.15359 1.14078 +/- 0.00435\n 36/1 1.13717 1.14045 +/- 0.00395\n 37/1 1.11754 1.13854 +/- 0.00408\n 38/1 1.14473 1.13902 +/- 0.00378\n 39/1 1.13514 1.13874 +/- 0.00351\n 40/1 1.14309 1.13903 +/- 0.00328\n 41/1 1.12419 1.13811 +/- 0.00321\n 42/1 1.16513 1.13969 +/- 0.00341\n 43/1 1.13054 1.13919 +/- 0.00325\n 44/1 1.16097 1.14033 +/- 0.00328\n 45/1 1.11692 1.13916 +/- 0.00333\n 46/1 1.14787 1.13958 +/- 0.00319\n 47/1 1.14340 1.13975 +/- 0.00305\n 48/1 1.14260 1.13987 +/- 0.00292\n 49/1 1.11251 1.13873 +/- 0.00302\n 50/1 1.13420 1.13855 +/- 0.00290\n 51/1 1.13906 1.13857 +/- 0.00278\n 52/1 1.16966 1.13972 +/- 0.00292\n 53/1 1.14142 1.13978 +/- 0.00281\n 54/1 1.12441 1.13925 +/- 0.00276\n 55/1 1.12082 1.13864 +/- 0.00274\n 56/1 1.15516 1.13917 +/- 0.00270\n 57/1 1.12057 1.13859 +/- 0.00268\n 58/1 1.12775 1.13826 +/- 0.00262\n 59/1 1.10942 1.13741 +/- 0.00268\n 60/1 1.15757 1.13799 +/- 0.00266\n 61/1 1.13339 1.13786 +/- 0.00259\n 62/1 1.11602 1.13727 +/- 0.00259\n 63/1 1.15500 1.13774 +/- 0.00256\n 64/1 1.16040 1.13832 +/- 0.00256\n 65/1 1.13148 1.13815 +/- 0.00250\n 66/1 1.11424 1.13757 +/- 0.00251\n 67/1 1.11745 1.13709 +/- 0.00250\n 68/1 1.12179 1.13673 +/- 0.00246\n 69/1 1.13592 1.13671 +/- 0.00241\n 70/1 1.11661 1.13627 +/- 0.00240\n 71/1 1.12463 1.13601 +/- 0.00236\n 72/1 1.11764 1.13562 +/- 0.00234\n 73/1 1.11708 1.13524 +/- 0.00232\n 74/1 1.12956 1.13512 +/- 0.00228\n 75/1 1.15045 1.13543 +/- 0.00225\n 76/1 1.16785 1.13606 +/- 0.00230\n 77/1 1.13094 1.13596 +/- 0.00225\n 78/1 1.16434 1.13650 +/- 0.00228\n 79/1 1.13344 1.13644 +/- 0.00223\n 80/1 1.14425 1.13658 +/- 0.00220\n 81/1 1.13127 1.13649 +/- 0.00216\n 82/1 1.16333 1.13696 +/- 0.00217\n 83/1 1.13221 1.13688 +/- 0.00214\n 84/1 1.15028 1.13711 +/- 0.00211\n 85/1 1.12838 1.13696 +/- 0.00208\n 86/1 1.14708 1.13713 +/- 0.00205\n 87/1 1.14026 1.13718 +/- 0.00202\n 88/1 1.12071 1.13692 +/- 0.00201\n 89/1 1.11532 1.13658 +/- 0.00200\n 90/1 1.15103 1.13680 +/- 0.00198\n 91/1 1.14755 1.13696 +/- 0.00196\n 92/1 1.13380 1.13692 +/- 0.00193\n 93/1 1.12608 1.13676 +/- 0.00191\n 94/1 1.14933 1.13694 +/- 0.00189\n 95/1 1.13916 1.13697 +/- 0.00186\n 96/1 1.14291 1.13705 +/- 0.00184\n 97/1 1.13837 1.13707 +/- 0.00181\n 98/1 1.17242 1.13756 +/- 0.00185\n 99/1 1.15117 1.13774 +/- 0.00184\n 100/1 1.14802 1.13788 +/- 0.00182\n Creating state point statepoint.100.h5...\n\n =======================> TIMING STATISTICS <=======================\n\n Total time for initialization = 3.9230e+00 seconds\n Reading cross sections = 3.8887e+00 seconds\n Total time in simulation = 3.7627e+01 seconds\n Time in transport only = 3.7478e+01 seconds\n Time in inactive batches = 8.5269e+00 seconds\n Time in active batches = 2.9100e+01 seconds\n Time synchronizing fission bank = 8.1082e-02 seconds\n Sampling source sites = 6.7674e-02 seconds\n SEND/RECV source sites = 1.3101e-02 seconds\n Time accumulating tallies = 1.2880e-04 seconds\n Time writing statepoints = 5.3223e-03 seconds\n Total time for finalization = 1.3530e-06 seconds\n Total time elapsed = 4.1572e+01 seconds\n Calculation Rate (inactive) = 29319.0 particles/second\n Calculation Rate (active) = 25773.4 particles/second\n\n ============================> RESULTS <============================\n\n k-effective (Collision) = 1.13815 +/- 0.00133\n k-effective (Track-length) = 1.13788 +/- 0.00182\n k-effective (Absorption) = 1.13913 +/- 0.00108\n Combined k-effective = 1.13880 +/- 0.00096\n Leakage Fraction = 0.00000 +/- 0.00000\n\n" ], [ "operator = openmc.deplete.Operator(geometry,settings,\"chain_casl_pwr.xml\");\n\npower = 1e4;\ndays = 24*3600;\ntime_steps = [0.1*days,0.1*days,0.3*days,0.5*days,1.*days,30.*days,30.*days,100.*days, 360.*days, 360.*days, 360.*days,360.*days,720.*days,720.*days];\nintegrator = openmc.deplete.PredictorIntegrator(operator,time_steps,power=power);", "_____no_output_____" ], [ "integrator.integrate()", "_____no_output_____" ], [ "results = openmc.deplete.ResultsList.from_hdf5('./depletion_results.h5')", "_____no_output_____" ], [ "time,k = results.get_eigenvalue()\ntime /= (24*60*60);", "_____no_output_____" ], [ "from matplotlib import pyplot", "_____no_output_____" ], [ "pyplot.errorbar(time,k[:,0],yerr=k[:,1]);\npyplot.title('Burnup Result for Pincell')\npyplot.xlabel('Time [d]');\npyplot.ylabel('$k_{eff} \\pm \\sigma$');\npyplot.grid()", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d01579acb9d0975f35bd146cdc4c4850e2397110
1,484
ipynb
Jupyter Notebook
src/Niederriter_GC_examples.ipynb
robbyyt/quantum-cryptosystems
cb03e1097bcf61ac6785958ac92382aceaf9ac52
[ "MIT" ]
1
2022-01-02T12:57:35.000Z
2022-01-02T12:57:35.000Z
src/.ipynb_checkpoints/Niederriter_GC_examples-checkpoint.ipynb
robbyyt/quantum-cryptosystems
cb03e1097bcf61ac6785958ac92382aceaf9ac52
[ "MIT" ]
null
null
null
src/.ipynb_checkpoints/Niederriter_GC_examples-checkpoint.ipynb
robbyyt/quantum-cryptosystems
cb03e1097bcf61ac6785958ac92382aceaf9ac52
[ "MIT" ]
1
2022-01-02T12:57:43.000Z
2022-01-02T12:57:43.000Z
28
91
0.580863
[ [ [ "# function to create a random message to encrypt\n\ndef GetRandomMessageWithWeight(message_length, message_weight):\n message = matrix(GF(2), 1, message_length)\n rng = range(message_length)\n for i in range(message_weight):\n p = floor(len(rng)*random())\n message[0,rng[p]] = 1 \n rng=[*rng[:p],*rng[p+1:]]\n return message\n\ncrypto = Niederreiter()\nmessage = GetRandomMessageWithWeight(crypto._PublicKey.ncols(),crypto._g.degree())\nencrypted_message = crypto.encrypt(message)\ndecrypted_message = crypto.decrypt(encrypted_message)\nprint('random message:', message.str())\nprint('encrypted message:', encrypted_message.str())\nprint('decrpted message:', decrypted_message.str())\nprint('decryption is: ', message==decrypted_message)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code" ] ]
d0159709681746a59d62de2748ff78d86cb45cb9
6,019
ipynb
Jupyter Notebook
object-detection-azureml/031_DevAndRegisterModel.ipynb
Bhaskers-Blu-Org2/deploy-MLmodels-on-iotedge
e27f2667347e5349206a66ac29f9919c408c7676
[ "MIT" ]
13
2020-02-18T07:05:21.000Z
2022-03-28T14:23:12.000Z
object-detection-azureml/031_DevAndRegisterModel.ipynb
Bhaskers-Blu-Org2/deploy-MLmodels-on-iotedge
e27f2667347e5349206a66ac29f9919c408c7676
[ "MIT" ]
3
2020-01-28T23:03:09.000Z
2020-11-13T18:24:40.000Z
object-detection-azureml/031_DevAndRegisterModel.ipynb
microsoft/deploy-MLmodels-on-iotedge
e27f2667347e5349206a66ac29f9919c408c7676
[ "MIT" ]
8
2020-02-21T01:40:29.000Z
2022-03-28T13:34:37.000Z
23.420233
237
0.557734
[ [ [ "# Develop and Register Model\nIn this noteook, we will go through the steps to load the MaskRCNN model and call the model to find the top predictions. We will then register the model in ACR using AzureML.\n\n Note: Always make sure you don't have any lingering notebooks running (Shutdown previous notebooks). Otherwise it may cause GPU memory issue.\n", "_____no_output_____" ] ], [ [ "%reload_ext autoreload\n%autoreload 2\n%matplotlib inline", "_____no_output_____" ], [ "import torch\nimport torchvision\nimport numpy as np\nfrom pathlib import *\nfrom PIL import Image\nfrom azureml.core.workspace import Workspace\nfrom azureml.core.model import Model\nfrom dotenv import set_key, find_dotenv\nfrom testing_utilities import get_auth\nimport urllib", "_____no_output_____" ], [ "env_path = find_dotenv(raise_error_if_not_found=True)", "_____no_output_____" ] ], [ [ "### Model\n\nWe load a pretrained [**Mask R-CNN ResNet-50 FPN** object detection model](https://pytorch.org/blog/torchvision03/). This model is trained on subset of COCO train2017, which contains the same 20 categories as those from Pascal VOC.", "_____no_output_____" ] ], [ [ "# use pretrained model: https://pytorch.org/blog/torchvision03/\nmodel = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n\n#device = torch.device(\"cpu\")\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nmodel.to(device)", "_____no_output_____" ], [ "url = \"https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth\"\nurllib.request.urlretrieve(url, \"maskrcnn_resnet50.pth\")", "_____no_output_____" ], [ "img_path = \"./test_image.jpg\"\nprint(Image.open(img_path).size)\nimg = Image.open(img_path)", "(1920, 1080)\n" ], [ "img = np.array(img.convert(mode='RGB'), dtype = np.float32) \nimg_tensor = torchvision.transforms.functional.to_tensor(img)/255", "_____no_output_____" ], [ "model.eval()\nwith torch.no_grad():\n prediction = model([img_tensor.to(device)])", "_____no_output_____" ], [ "print(prediction)", "_____no_output_____" ] ], [ [ "### Register Model\n\n", "_____no_output_____" ] ], [ [ "# Get workspace\n# Load existing workspace from the config file info.\n\nws = Workspace.from_config(auth=get_auth())\nprint(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep=\"\\n\")", "_____no_output_____" ], [ "\nmodel = Model.register(\n model_path=\"maskrcnn_resnet50.pth\", # this points to a local file\n model_name=\"maskrcnn_resnet50_model\", # this is the name the model is registered as\n tags={\"model\": \"dl\", \"framework\": \"maskrcnn\"},\n description=\"torchvision maskrcnn_resnet50\",\n workspace=ws,\n)", "Registering model maskrcnn_resnet50_model\n" ], [ "print(model.name, model.description, model.version)", "maskrcnn_resnet50_model torchvision maskrcnn_resnet50 1\n" ], [ "\nset_key(env_path, \"model_version\", str(model.version))", "_____no_output_____" ] ], [ [ "Next we will proceed with notebook [032_DevelopModelDriver.ipynb](032_DevelopModelDriver.ipynb).", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ] ]
d0159cb0cd3ad9ab9f4bcea5b579b38adc1fb573
7,436
ipynb
Jupyter Notebook
identification-of-phishing-websites.ipynb
Shubha23/Phishing-Website-Classification
59ce627c06e4aeae0e3b8085956cbd878d99b5ab
[ "Apache-2.0" ]
null
null
null
identification-of-phishing-websites.ipynb
Shubha23/Phishing-Website-Classification
59ce627c06e4aeae0e3b8085956cbd878d99b5ab
[ "Apache-2.0" ]
null
null
null
identification-of-phishing-websites.ipynb
Shubha23/Phishing-Website-Classification
59ce627c06e4aeae0e3b8085956cbd878d99b5ab
[ "Apache-2.0" ]
1
2018-10-19T22:44:52.000Z
2018-10-19T22:44:52.000Z
7,436
7,436
0.742873
[ [ [ "# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.", "_____no_output_____" ], [ "import numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n# Read the data\ndata = pd.read_csv('../input/Dataset.csv')\n\n# View first and last 5 observations\nprint(data.head())\nprint(data.tail())", "_____no_output_____" ], [ "# Describe statistical information of data\nprint(data.describe())\n# Below stats show that 75 percentile of obseravtions belong to class 1 ", "_____no_output_____" ], [ "# Check column types\nprint(data.info()) \n\n# All comumns are int type, so no change is required", "_____no_output_____" ], [ "# Plot distribution of classes using Histograms\nplt.figure(figsize =(8,8))\nplt.hist(data.Result) \n\n# It shows that benign class have about 1000+ observations than malware", "_____no_output_____" ], [ "# Look for missing values\nprint(data.isnull().sum()) \n\n# No missing values found, so no need to drop or replace any value", "_____no_output_____" ], [ "# Generate correlation matrix\nprint(data.corr())\n\nimport seaborn as sns\nplt.figure(figsize =(8,8))\nsns.heatmap(data.corr()) # Generate heatmap (though very less clarity due to large no. of ftrs", "_____no_output_____" ], [ "print(data.corr()['Result'].sort_values()) # Print correlation with target variable", "_____no_output_____" ], [ "# Remove features having correlation coeff. between +/- 0.03\ndata.drop(['Favicon','Iframe','Redirect',\n 'popUpWidnow','RightClick','Submitting_to_email'],axis=1,inplace=True)\nprint(len(data.columns))", "_____no_output_____" ], [ "# Prepare data for models\ny = data['Result'].values\nX = data.drop(['Result'], axis = 1)\n\nfrom sklearn.metrics import accuracy_score,roc_curve,auc, confusion_matrix\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import train_test_split\n\n# Split the data as training and testing data - 70% train size, 30% test size\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = None)", "_____no_output_____" ], [ "#1 Classification using Random Forest Classifier\nfrom sklearn.ensemble import RandomForestClassifier\nrfc = RandomForestClassifier()\nrfc = rfc.fit(X_train,y_train)\nprediction = rfc.predict(X_test)\nprint(\"Accuracy with RF classifier:\",accuracy_score(y_test, prediction)) \nfpr,tpr,thresh = roc_curve(y_test,prediction) \nroc_auc = accuracy_score(y_test,prediction) # Calculate ROC AUC\n\n# Plot ROC curve for Random Forest\nplt.plot(fpr,tpr,'g',label = 'Random Forest')\nplt.legend(\"Random Forest\", loc='lower right')\nplt.legend(loc='lower right')\nprint(\"Conf matrix RF classifier:\",confusion_matrix(y_test,prediction)) # Generate confusion matrix\n\n#2 Classification using logistic regression\nfrom sklearn.linear_model import LogisticRegression\nlogreg = LogisticRegression()\nlogreg = logreg.fit(X_train,y_train)\nprediction = logreg.predict(X_test)\nprint(\"Accuracy with Log Reg:\", accuracy_score(y_test, prediction))\nprint (\"Conf matrix Log Reg:\",confusion_matrix(y_test,prediction))\nfpr,tpr,thresh = roc_curve(y_test,prediction)\nroc_auc = accuracy_score(y_test,prediction)\n\n# Plot ROC curve for Logistic Regression\nplt.plot(fpr,tpr,'orange',label = 'Logistic Regression')\nplt.legend(\"Logistic Regression\", loc='lower right')\nplt.xlabel(\"False positive rate\")\nplt.ylabel(\"True positive rate\")\nplt.legend(loc='lower right')\n\n#3 Classification using SVM\nfrom sklearn.svm import SVC\nsvc_l = SVC(kernel = \"linear\", C = 0.025)\nsvc_l = svc_l.fit(X_train,y_train)\nprediction = svc_l.predict(X_test)\nprint(\"Accuracy with SVM-Linear:\",accuracy_score(y_test, prediction))\nfpr,tpr,thresh = roc_curve(y_test,prediction)\nroc_auc = accuracy_score(y_test,prediction)\n\n# Plot ROC curve for SVM-linear\nplt.plot(fpr,tpr,'b',label = 'SVM')\nplt.legend(\"SVM\", loc ='lower right')\nplt.legend(loc ='lower right')\nprint(\"Conf matrix SVM-linear:\",confusion_matrix(y_test,prediction))\n\nplt.show()\n\n'''\n# -------- Apply Recursive Feature Elimination(RFE) and use reduced feature set for prediction ------------------------\n# Recursive Feature Elimination(RFE) is a technique that takes entire feature set as input and removes features one at \n# a time up to a specified number or until a stopping criteria is met.\n'''\nfrom sklearn.feature_selection import RFE\nrfe = RFE(rfc,27) \nrfe = rfe.fit(X_train, y_train) # Train RF classifier with only 27 features now\npred = rfe.predict(X_test)\n\n# Test accuracy on reduced data\nprint(\"Accuracy by RFClassifier after RFE is applied:\", accuracy_score(y_test,pred))\n\nrfe = RFE(svc_l,27)\nrfe = rfe.fit(X_train, y_train) # Train SVM with only 27 features now\npred = rfe.predict(X_test)\nprint(\"Accuracy by SVM-Linear after RFE is applied:\", accuracy_score(y_test,pred))\n\nrfe = RFE(logreg,27)\nrfe = rfe.fit(X_train, y_train) # Train Logistic-Reg with only 27 features now\npred = rfe.predict(X_test)\nprint(\"Accuracy by Logistic Regression after RFE is applied:\", accuracy_score(y_test,pred))", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d015aa8f04a85e256485a4dea3fc272ada8404f9
28,040
ipynb
Jupyter Notebook
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
df71d82498158c2780f84556c0f571f06181adec
[ "MIT" ]
null
null
null
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
df71d82498158c2780f84556c0f571f06181adec
[ "MIT" ]
null
null
null
TensorFI_Capsnet.ipynb
MahdiSajedi/TensorFI
df71d82498158c2780f84556c0f571f06181adec
[ "MIT" ]
null
null
null
74.376658
304
0.622896
[ [ [ "<a href=\"https://colab.research.google.com/github/MahdiSajedi/TensorFI/blob/master/TensorFI_Capsnet.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "", "_____no_output_____" ] ], [ [ "!nvidia-smi\n", "Sun Dec 6 06:17:07 2020 \n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 455.45.01 Driver Version: 418.67 CUDA Version: 10.1 |\n|-------------------------------+----------------------+----------------------+\n| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |\n| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |\n| | | MIG M. |\n|===============================+======================+======================|\n| 0 Tesla T4 Off | 00000000:00:04.0 Off | 0 |\n| N/A 39C P8 10W / 70W | 0MiB / 15079MiB | 0% Default |\n| | | ERR! |\n+-------------------------------+----------------------+----------------------+\n \n+-----------------------------------------------------------------------------+\n| Processes: |\n| GPU GI CI PID Type Process name GPU Memory |\n| ID ID Usage |\n|=============================================================================|\n| No running processes found |\n+-----------------------------------------------------------------------------+\n" ] ], [ [ "## import `tensorflow version 1` for colab and `os`\n", "_____no_output_____" ] ], [ [ "# set tensorflow version to 1\n%tensorflow_version 1.x\n# if need to install some spesfic version\n# !pip install tensorflow-gpu==1.10.0\n\nimport os\n", "_____no_output_____" ] ], [ [ "## **Download Modified git repo and change dir to `TensorFI`**", "_____no_output_____" ] ], [ [ "!git clone https://github.com/MahdiSajedi/TensorFI.git\nos.chdir('TensorFI')\n!pwd\n%ls", "fatal: destination path 'TensorFI' already exists and is not an empty directory.\n/content/TensorFI/TensorFI\nfaultTypes.py fiLog.py __init__.py modifyGraph.py tensorFI.py\nfiConfig.py fiStats.py injectFault.py printGraph.py\n" ] ], [ [ "## Intstall `TensorFI` pip package\n## Run `capsnet.py` file", "_____no_output_____" ] ], [ [ "!pip install tensorfi\n!python ./Tests/capsnet.py", "Collecting tensorfi\n Downloading https://files.pythonhosted.org/packages/a9/42/785bff81fdc16642c306efcfc416f9c03e64f3d904dc468c38912656c07f/TensorFI-2.0.0-py2-none-any.whl\nRequirement already satisfied: scikit-learn in /usr/local/lib/python2.7/dist-packages (from tensorfi) (0.20.3)\nRequirement already satisfied: tensorflow in /tensorflow-1.15.2/python2.7 (from tensorfi) (1.15.2)\nRequirement already satisfied: pyyaml in /usr/local/lib/python2.7/dist-packages (from tensorfi) (3.13)\nRequirement already satisfied: enum34 in /usr/local/lib/python2.7/dist-packages (from tensorfi) (1.1.6)\nRequirement already satisfied: numpy in /usr/local/lib/python2.7/dist-packages (from tensorfi) (1.16.4)\nRequirement already satisfied: scipy>=0.13.3 in /usr/local/lib/python2.7/dist-packages (from scikit-learn->tensorfi) (1.2.2)\nRequirement already satisfied: gast==0.2.2 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (0.2.2)\nRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.15.0)\nRequirement already satisfied: wheel; python_version < \"3\" in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (0.36.0)\nRequirement already satisfied: tensorboard<1.16.0,>=1.15.0 in /tensorflow-1.15.2/python2.7 (from tensorflow->tensorfi) (1.15.0)\nRequirement already satisfied: protobuf>=3.6.1 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (3.8.0)\nRequirement already satisfied: keras-preprocessing>=1.0.5 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.1.0)\nRequirement already satisfied: backports.weakref>=1.0rc1; python_version < \"3.4\" in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.0.post1)\nCollecting tensorflow-estimator==1.15.1\n\u001b[?25l Downloading https://files.pythonhosted.org/packages/de/62/2ee9cd74c9fa2fa450877847ba560b260f5d0fb70ee0595203082dafcc9d/tensorflow_estimator-1.15.1-py2.py3-none-any.whl (503kB)\n\u001b[K |████████████████████████████████| 512kB 11.7MB/s \n\u001b[?25hRequirement already satisfied: keras-applications>=1.0.8 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.0.8)\nRequirement already satisfied: functools32>=3.2.3; python_version < \"3\" in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (3.2.3.post2)\nRequirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.11.2)\nRequirement already satisfied: six>=1.10.0 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.15.0)\nRequirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (0.7.1)\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (2.3.2)\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (1.1.0)\nRequirement already satisfied: google-pasta>=0.1.6 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (0.1.7)\nRequirement already satisfied: mock>=2.0.0; python_version < \"3\" in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (2.0.0)\nRequirement already satisfied: astor>=0.6.0 in /usr/local/lib/python2.7/dist-packages (from tensorflow->tensorfi) (0.8.1)\nRequirement already satisfied: futures>=2.2.0 in /usr/local/lib/python2.7/dist-packages (from grpcio>=1.8.6->tensorflow->tensorfi) (3.2.0)\nRequirement already satisfied: setuptools>=41.0.0 in /usr/local/lib/python2.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow->tensorfi) (44.1.1)\nRequirement already satisfied: werkzeug>=0.11.15 in /usr/local/lib/python2.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow->tensorfi) (0.15.5)\nRequirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python2.7/dist-packages (from tensorboard<1.16.0,>=1.15.0->tensorflow->tensorfi) (3.1.1)\nRequirement already satisfied: h5py in /usr/local/lib/python2.7/dist-packages (from keras-applications>=1.0.8->tensorflow->tensorfi) (2.8.0)\nRequirement already satisfied: funcsigs>=1; python_version < \"3.3\" in /usr/local/lib/python2.7/dist-packages (from mock>=2.0.0; python_version < \"3\"->tensorflow->tensorfi) (1.0.2)\nRequirement already satisfied: pbr>=0.11 in /usr/local/lib/python2.7/dist-packages (from mock>=2.0.0; python_version < \"3\"->tensorflow->tensorfi) (5.4.0)\nInstalling collected packages: tensorfi, tensorflow-estimator\n Found existing installation: tensorflow-estimator 1.15.0\n Uninstalling tensorflow-estimator-1.15.0:\n Successfully uninstalled tensorflow-estimator-1.15.0\nSuccessfully installed tensorfi-2.0.0 tensorflow-estimator-1.15.1\nWARNING: Logging before flag parsing goes to stderr.\nW1206 07:11:45.691169 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:10: The name tf.reset_default_graph is deprecated. Please use tf.compat.v1.reset_default_graph instead.\n\nW1206 07:11:45.691445 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:11: The name tf.set_random_seed is deprecated. Please use tf.compat.v1.set_random_seed instead.\n\nW1206 07:11:47.000478 140003042297728 deprecation.py:323] From ./Tests/capsnet.py:15: read_data_sets (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nW1206 07:11:47.000724 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:260: maybe_download (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease write your own downloading logic.\nW1206 07:11:47.001030 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/contrib/learn/python/learn/datasets/base.py:252: wrapped_fn (from tensorflow.contrib.learn.python.learn.datasets.base) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use urllib or similar directly.\nSuccessfully downloaded train-images-idx3-ubyte.gz 9912422 bytes.\nW1206 07:11:47.076227 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:262: extract_images (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting /tmp/data/train-images-idx3-ubyte.gz\nSuccessfully downloaded train-labels-idx1-ubyte.gz 28881 bytes.\nW1206 07:11:47.397264 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:267: extract_labels (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use tf.data to implement this functionality.\nExtracting /tmp/data/train-labels-idx1-ubyte.gz\nSuccessfully downloaded t10k-images-idx3-ubyte.gz 1648877 bytes.\nExtracting /tmp/data/t10k-images-idx3-ubyte.gz\nSuccessfully downloaded t10k-labels-idx1-ubyte.gz 4542 bytes.\nExtracting /tmp/data/t10k-labels-idx1-ubyte.gz\nW1206 07:11:47.491441 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/contrib/learn/python/learn/datasets/mnist.py:290: __init__ (from tensorflow.contrib.learn.python.learn.datasets.mnist) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use alternatives such as official/mnist/dataset.py from tensorflow/models.\nW1206 07:11:47.690685 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:18: The name tf.placeholder is deprecated. Please use tf.compat.v1.placeholder instead.\n\nW1206 07:11:47.694411 140003042297728 deprecation.py:323] From ./Tests/capsnet.py:31: conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse `tf.keras.layers.Conv2D` instead.\nW1206 07:11:47.695725 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/python/layers/convolutional.py:424: apply (from tensorflow.python.keras.engine.base_layer) is deprecated and will be removed in a future version.\nInstructions for updating:\nPlease use `layer.__call__` method instead.\nW1206 07:11:47.743329 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:61: The name tf.random_normal is deprecated. Please use tf.random.normal instead.\n\nW1206 07:11:47.869446 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:159: The name tf.placeholder_with_default is deprecated. Please use tf.compat.v1.placeholder_with_default instead.\n\nW1206 07:11:47.879053 140003042297728 deprecation.py:323] From ./Tests/capsnet.py:184: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse keras.layers.Dense instead.\nW1206 07:11:47.918373 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:205: The name tf.train.AdamOptimizer is deprecated. Please use tf.compat.v1.train.AdamOptimizer instead.\n\nW1206 07:11:47.991389 140003042297728 deprecation.py:323] From /tensorflow-1.15.2/python2.7/tensorflow_core/python/ops/math_grad.py:1424: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\nUse tf.where in 2.0, which has the same broadcast rule as np.where\nW1206 07:11:48.392817 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:209: The name tf.global_variables_initializer is deprecated. Please use tf.compat.v1.global_variables_initializer instead.\n\nW1206 07:11:48.393774 140003042297728 module_wrapper.py:139] From ./Tests/capsnet.py:226: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead.\n\n2020-12-06 07:11:48.395217: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcuda.so.1\n2020-12-06 07:11:48.450687: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:48.451306: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Found device 0 with properties: \nname: Tesla T4 major: 7 minor: 5 memoryClockRate(GHz): 1.59\npciBusID: 0000:00:04.0\n2020-12-06 07:11:48.451626: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-12-06 07:11:48.649872: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-12-06 07:11:48.804605: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-12-06 07:11:48.824673: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-12-06 07:11:49.098178: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-12-06 07:11:49.114419: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-12-06 07:11:49.615314: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-12-06 07:11:49.615567: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.616265: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.616826: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1767] Adding visible gpu devices: 0\n2020-12-06 07:11:49.625607: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2200000000 Hz\n2020-12-06 07:11:49.626908: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x5644ab3b4140 initialized for platform Host (this does not guarantee that XLA will be used). Devices:\n2020-12-06 07:11:49.626942: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version\n2020-12-06 07:11:49.758105: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.758855: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x5644ab3b5480 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n2020-12-06 07:11:49.758887: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Tesla T4, Compute Capability 7.5\n2020-12-06 07:11:49.759698: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.760302: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1639] Found device 0 with properties: \nname: Tesla T4 major: 7 minor: 5 memoryClockRate(GHz): 1.59\npciBusID: 0000:00:04.0\n2020-12-06 07:11:49.760384: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-12-06 07:11:49.760409: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-12-06 07:11:49.760436: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcufft.so.10\n2020-12-06 07:11:49.760459: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcurand.so.10\n2020-12-06 07:11:49.760482: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusolver.so.10\n2020-12-06 07:11:49.760506: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcusparse.so.10\n2020-12-06 07:11:49.760547: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\n2020-12-06 07:11:49.760626: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.761254: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.761829: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1767] Adding visible gpu devices: 0\n2020-12-06 07:11:49.763699: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudart.so.10.1\n2020-12-06 07:11:49.765094: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1180] Device interconnect StreamExecutor with strength 1 edge matrix:\n2020-12-06 07:11:49.765127: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1186] 0 \n2020-12-06 07:11:49.765146: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1199] 0: N \n2020-12-06 07:11:49.765585: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.766304: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:983] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n2020-12-06 07:11:49.766896: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:39] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n2020-12-06 07:11:49.766940: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1325] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 14221 MB memory) -> physical GPU (device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5)\n2020-12-06 07:11:57.871417: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcublas.so.10\n2020-12-06 07:11:59.386467: I tensorflow/stream_executor/platform/default/dso_loader.cc:44] Successfully opened dynamic library libcudnn.so.7\nEpoch: 1 Val accuracy: 95.6400% Loss: 0.063592 (improved)\nI1206 07:12:37.201371 140003042297728 tensorFI.py:179] Done setting logLevel to 10\nI1206 07:12:37.201723 140003042297728 tensorFI.py:183] Initializing the injector\nI1206 07:12:37.206099 140003042297728 tensorFI.py:187] Modifying graph in session \nW1206 07:12:37.206728 140003042297728 deprecation.py:323] From /usr/local/lib/python2.7/dist-packages/TensorFI/modifyGraph.py:34: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.\nInstructions for updating:\ntf.py_func is deprecated in TF V2. Instead, there are two\n options available in V2.\n - tf.py_function takes a python function which manipulates tf eager\n tensors instead of numpy arrays. It's easy to convert a tf eager tensor to\n an ndarray (just call tensor.numpy()) but having access to eager tensors\n means `tf.py_function`s can use accelerators such as GPUs as well as\n being differentiable using a gradient tape.\n - tf.numpy_function maintains the semantics of the deprecated tf.py_func\n (it is not differentiable, and manipulates numpy arrays). It drops the\n stateful argument making all functions stateful.\n \nI1206 07:12:37.682312 140003042297728 tensorFI.py:190] Done modification of graph\nI1206 07:12:37.682498 140003042297728 tensorFI.py:195] Initializing the fault injection parameters\nI1206 07:12:37.682849 140003042297728 injectFault.py:40] Initialized config file : FIConfig: {\n\tfaultTypeScalar : None\n\tfaultTypeTensor : Rand\n\tinjectMap : {<Ops.CONV2D: 'CONV2D'>: 1.0}\n\tfaultSeed : 1000\n\tskipCount : 0\n }\nI1206 07:12:37.682956 140003042297728 tensorFI.py:200] Initializing the fault log\nUnable to open log file faultLogs/NoName-log\nStarting log at 2020-12-06 07:12:37.683075\n\nI1206 07:12:37.683126 140003042297728 tensorFI.py:206] Performing monkey patching\nI1206 07:12:37.683207 140003042297728 tensorFI.py:212] Done with init\nTesting Accuracy: 0.94400007\n\n---------------------------------------\nDone injections\n" ], [ "!pwd", "/content/TensorFI\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d015b1f1d57a0e833f5c309f03ed58ef87414929
14,443
ipynb
Jupyter Notebook
Markov Model.ipynb
MeNsaaH/project-web-app
b46d551e856af7ed7c214dd0a25b8d8d6c02e07e
[ "BSD-2-Clause" ]
null
null
null
Markov Model.ipynb
MeNsaaH/project-web-app
b46d551e856af7ed7c214dd0a25b8d8d6c02e07e
[ "BSD-2-Clause" ]
null
null
null
Markov Model.ipynb
MeNsaaH/project-web-app
b46d551e856af7ed7c214dd0a25b8d8d6c02e07e
[ "BSD-2-Clause" ]
null
null
null
24.031614
118
0.413072
[ [ [ "import os\nimport random\nfrom django.conf import settings\nimport numpy as np\nimport pandas as pd", "_____no_output_____" ], [ "definition = [('0', \"Not Flooded\"), ('2', \"Almost Flooded\"), ('3', 'Flooded')]", "_____no_output_____" ], [ "datapath = \"/clean_flood.csv\"", "_____no_output_____" ], [ "data = pd.read_csv(settings.DATASET_DIR + datapath, dtype={'State': str})", "_____no_output_____" ], [ "data[\"WaterLevel\"] = data[\"WaterLevel\"]/data[\"WaterLevel\"].mean() - (np.random.rand() - 0.1)", "_____no_output_____" ], [ "data[\"WaterLevel\"].describe()", "_____no_output_____" ], [ "criteria = [data['WaterLevel'].le(0.75), data['WaterLevel'].between(0.75, 0.98), data['WaterLevel'].ge(0.98)]\nvalues = [0, 1, 2]", "_____no_output_____" ], [ "data['state'] = np.select(criteria, values, 0)", "_____no_output_____" ], [ "data.describe()", "_____no_output_____" ], [ "data[\"state\"] = data[\"state\"].map(lambda x: random.choice([0, 1, 2, 1, 2, 0, 1]))", "_____no_output_____" ], [ "data[\"next_state\"] = data[\"state\"].shift()", "_____no_output_____" ], [ "data.head()", "_____no_output_____" ], [ "states = {\"normal\": 0, \"almost_flooded\":1, \"flooded\": 2}\ntransitions = {\"normal\": {}, \"almost_flooded\":{}, \"flooded\": {}}", "_____no_output_____" ], [ "for i in states.items():\n for j in states.items():\n transitions[i[0]][j[0]] = data[(data[\"state\"] == i[1]) & (data[\"next_state\"] == j[1])].shape[0]\n ", "_____no_output_____" ], [ "transitions", "_____no_output_____" ], [ "df = pd.DataFrame(transitions)", "_____no_output_____" ], [ "df.shape[0]", "_____no_output_____" ], [ "for i in range(df.shape[0]):\n df.iloc[i] = df.iloc[i]/df.iloc[i].sum()", "_____no_output_____" ], [ "transition_matrix = df.values", "_____no_output_____" ], [ "transition_matrix", "_____no_output_____" ], [ "np.atleast_2d(transition_matrix)", "_____no_output_____" ], [ "states = list(states.keys())", "_____no_output_____" ], [ "index_dict = {states[index]: index for index in range(len(states))}", "_____no_output_____" ], [ "state_dict = {index: states[index] for index in range(len(states))}", "_____no_output_____" ], [ "state_dict, index_dict", "_____no_output_____" ], [ "from main.utils.predictor import MarkovChain", "_____no_output_____" ], [ "predictor = MarkovChain(transition_matrix, states)", "_____no_output_____" ], [ "predictor.generate_states('flooded', no=10)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d015c8b0b145fe25ccc95dbcf540586989016a1a
8,908
ipynb
Jupyter Notebook
notebooks/Example_3_learningequality.ipynb
learningequality/BasicCrawler
f3839467d7c0f3e53527e6009232f12216847ed1
[ "MIT" ]
4
2018-06-02T07:16:21.000Z
2021-01-17T12:15:22.000Z
notebooks/Example_3_learningequality.ipynb
learningequality/BasicCrawler
f3839467d7c0f3e53527e6009232f12216847ed1
[ "MIT" ]
4
2017-11-20T13:48:38.000Z
2020-07-03T03:09:24.000Z
notebooks/Example_3_learningequality.ipynb
learningequality/BasicCrawler
f3839467d7c0f3e53527e6009232f12216847ed1
[ "MIT" ]
3
2017-11-20T13:39:23.000Z
2020-06-12T12:26:16.000Z
45.917526
308
0.574427
[ [ [ "import re\nfrom basiccrawler.crawler import BasicCrawler\n\n\ncrawler = BasicCrawler(main_source_domain='https://learningequality.org')\ncrawler.IGNORE_URLS.append(re.compile('.*cdn-cgi/l/email-protection.*'))\ncrawler.CRAWLING_STAGE_OUTPUT = 'chefdata/trees/learningequality_web_resource_tree.json'\n\n# TODO: implement a \"skip PDFs\" feature, because oterwise initial crawl 20mins+\nchannel_tree = crawler.crawl(limit=10000)", "Cache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\nCache entry deserialization failed, entry ignored\n" ], [ "crawler.print_tree(channel_tree, print_depth=3)", " - path: / (PageWebResource) \n children:\n - path: / (PageWebResource) \n - path: /about/ (PageWebResource) \n - path: /about/values/ (PageWebResource) \n - path: /about/team/ (PageWebResource) \n - path: /about/board/ (PageWebResource) \n - path: /about/supporters/ (PageWebResource) \n - path: /about/press/ (PageWebResource) \n - path: /about/jobs/ (PageWebResource) \n - path: /about/internships/ (PageWebResource) \n children:\n - path: /about/jobs/?gh_jid=533166 (PageWebResource) \n children counts: {'PageWebResource': 1}\n - path: /directions/ (PageWebResource) \n - path: /kolibri/ (PageWebResource) \n - path: /download/ (PageWebResource) \n children:\n - path: https://storage.googleapis.com/le-releases/downloads/kolibri/v0.12.3/kolibri-v0.12.3-windows-setup.exe (PageWebResource) \n - path: https://storage.googleapis.com/le-releases/downloads/kolibri/v0.12.3/kolibri_0.12.3-0ubuntu1_all.deb (PageWebResource) \n - path: https://storage.googleapis.com/le-releases/downloads/kolibri/v0.12.3/kolibri-0.12.3.pex (MediaWebResource) \n - path: /documentation/ (PageWebResource) \n children:\n - path: https://drive.google.com/drive/folders/1TNCjAOk24NKZFdK-GYzXSbUhksZ4wu0r?usp=sharing (PageWebResource) \n - path: https://accounts.google.com/ServiceLogin?service=wise&passive=1209600&continue=https://drive.google.com/file/d/1Pachcc-k7CRRu3koyl-IgKvwzmkZVttN/view?usp%3Dsharing&followup=https://drive.google.com/file/d/1Pachcc-k7CRRu3koyl-IgKvwzmkZVttN/view?usp%3Dsharing (PageWebResource) \n - path: /hardware_grant/ (PageWebResource) \n - path: /translate/ (PageWebResource) \n - path: https://blog.learningequality.org/?gi=a88742768739 (PageWebResource) \n - path: /ka-lite/map/add/ (PageWebResource) \n - path: /donate/ (PageWebResource) \n children:\n - path: /static/doc/learning_equality_irs_determination_letter.pdf (MediaWebResource) \n - path: /ka-lite/ (PageWebResource) \n children:\n - path: https://ka-lite.readthedocs.io/en/latest/installguide/install_main.html (PageWebResource) \n - path: https://ka-lite.readthedocs.io/en/latest/ (PageWebResource) \n - path: /ka-lite/infographic/ (PageWebResource) \n - path: /ka-lite/map/ (PageWebResource) \n children:\n - path: https://blog.learningequality.org/samuel-morris-scholars-program-an-unforgettable-impact-99f28a5faeff (PageWebResource) \n - path: https://blog.learningequality.org/ka-lite-brings-offline-education-to-idaho-department-of-corrections-90f7a5f1bf4b (PageWebResource) \n - path: https://blog.learningequality.org/the-sparks-familys-overland-travel-from-alaska-to-argentina-dedde75a145 (PageWebResource) \n - path: https://blog.learningequality.org/inspiring-confidence-a-guest-post-from-edward-j-hills-28719ed03196 (PageWebResource) \n - path: https://blog.learningequality.org/ka-lite-in-typhoon-stricken-mangorocoro-c781aa9c4cb4 (PageWebResource) \n - path: https://blog.learningequality.org/deployment-spotlight-ka-lite-in-rural-guatemala-ec6d991ea802 (PageWebResource) \n - path: https://blog.learningequality.org/bringing-ka-lite-to-gitwe-rwanda-38872419b1ac (PageWebResource) \n - path: /media/FUNSEPA_Final_Evaluation_Report_27May2016.pdf (MediaWebResource) \n - path: /media/Rapport-Etude-Cameroun_KL_ENG.pdf (MediaWebResource) \n" ] ] ]
[ "code" ]
[ [ "code", "code" ] ]
d015cc82f0a9203ceebaaf42432a47db6e735af1
83,032
ipynb
Jupyter Notebook
bin.ipynb
CoolTowel/JellyFish
d1239d3cbe8e398c560a13b48c32d39632a78169
[ "MIT" ]
null
null
null
bin.ipynb
CoolTowel/JellyFish
d1239d3cbe8e398c560a13b48c32d39632a78169
[ "MIT" ]
null
null
null
bin.ipynb
CoolTowel/JellyFish
d1239d3cbe8e398c560a13b48c32d39632a78169
[ "MIT" ]
null
null
null
266.983923
40,102
0.800884
[ [ [ "import jerel\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table, Column\nfrom marvin import config\n\nconfig.setRelease('DR16')\nconfig.setDR('DR16')\nconfig.download = True\n\nimport random\n\nimport warnings \nwarnings.filterwarnings(\"ignore\")\n\nwith fits.open('./data/galaxies_sorted.fits') as hdulist:\n plateifu_list = hdulist[1].data['plateifu']", "\u001b[0;34m[INFO]: \u001b[0mNo release version set. Setting default to DR15\n" ], [ "for i in range(23):\n plateifu = plateifu_list[i]\n test_map_ha,snr,er = jerel.my_ha(plateifu=plateifu)\n test_map_hb,snr,er = jerel.my_hb(plateifu=plateifu)\n fig, axs = plt.subplots(1,2)\n axs[0].imshow(np.log10(test_map_ha),origin='lower')\n axs[1].imshow(np.log10(test_map_hb),origin='lower')", "_____no_output_____" ], [ "test_map,snr,er = jerel.sfr(plateifu=plateifu_list[21])\n\nradii_mask = 1*(er < 1.5)\n\nholes = test_map.filled(fill_value=-99)\nholes = holes != -99\nholes = radii_mask-holes\ntest_map[holes==1]=np.nan\n\nplt.imshow(test_map,origin='lower')\nplt.colorbar()", "_____no_output_____" ], [ "from astropy.convolution import Gaussian2DKernel, interpolate_replace_nans\n\n# Load the data from data.astropy.org\n\n\n# We smooth with a Gaussian kernel with x_stddev=1 (and y_stddev=1)\n# It is a 9x9 array\nkernel = Gaussian2DKernel(x_stddev=1)\n\n# create a \"fixed\" image with NaNs replaced by interpolated values\nfixed_image = interpolate_replace_nans(test_map, kernel)\n\nplt.imshow(fixed_image,origin='lower')\nplt.colorbar()", "_____no_output_____" ], [ "test_map,snr,er = jerel.sfr(plateifu=plateifu_list[21])\n\nnp.sum(fixed_image.filled(fill_value=0))", "_____no_output_____" ], [ "(image >0)[35:42,62:70]", "_____no_output_____" ], [ "import jerelh\nsfr_map, snr_map, er_map = jerelh.sfr_noholes(plateifu_list[16], 2, 1.5)\nsfr_map_h, snr_map, er_map = jerelh.sfr(plateifu_list[16], 2, 1.5)\nfig, axs = plt.subplots(1,2)\naxs[0].imshow(np.log10(sfr_map),origin='lower')\naxs[1].imshow(np.log10(sfr_map_h),origin='lower')", "_____no_output_____" ], [ "np.ma.sum(sfr_map)-np.ma.sum(sfr_map_h)", "_____no_output_____" ], [ "%%time\ntt = jerelh.stat_list(plateifu_list, np.zeros(np.shape(plateifu_list), dtype='int'))", "CPU times: user 19.2 s, sys: 262 ms, total: 19.5 s\nWall time: 19.6 s\n" ], [ "t", "_____no_output_____" ], [ "tt", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d015ccb39916c0b01e50b25ce4d7a7266ac69ed2
17,301
ipynb
Jupyter Notebook
if_else/if_else.ipynb
fontainedeseaux/NSI_premiere
aa5094891e989b7b5c752f14f80cbb36599f8afa
[ "MIT" ]
null
null
null
if_else/if_else.ipynb
fontainedeseaux/NSI_premiere
aa5094891e989b7b5c752f14f80cbb36599f8afa
[ "MIT" ]
null
null
null
if_else/if_else.ipynb
fontainedeseaux/NSI_premiere
aa5094891e989b7b5c752f14f80cbb36599f8afa
[ "MIT" ]
null
null
null
44.821244
390
0.596729
[ [ [ "empty" ] ] ]
[ "empty" ]
[ [ "empty" ] ]
d015d05cf2c677735bc5cddf794f8df944ee2135
636,315
ipynb
Jupyter Notebook
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
b418bf997013dd00cb168c26dc3c81957d658f13
[ "Apache-2.0" ]
null
null
null
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
b418bf997013dd00cb168c26dc3c81957d658f13
[ "Apache-2.0" ]
null
null
null
vui_notebook.ipynb
shubhank-saxena/dnn-speech-recognizer
b418bf997013dd00cb168c26dc3c81957d658f13
[ "Apache-2.0" ]
null
null
null
384.480363
267,296
0.857822
[ [ [ "# Artificial Intelligence Nanodegree\n\n## Voice User Interfaces\n\n## Project: Speech Recognition with Neural Networks\n\n---\n\nIn this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with **'(IMPLEMENTATION)'** in the header indicate that the following blocks of code will require additional functionality which you must provide. Please be sure to read the instructions carefully! \n\n> **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the Jupyter Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \\n\",\n \"**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.\n\nIn addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.\n\n>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. Markdown cells can be edited by double-clicking the cell to enter edit mode.\n\nThe rubric contains _optional_ \"Stand Out Suggestions\" for enhancing the project beyond the minimum requirements. If you decide to pursue the \"Stand Out Suggestions\", you should include the code in this Jupyter notebook.\n\n---\n\n## Introduction \n\nIn this notebook, you will build a deep neural network that functions as part of an end-to-end automatic speech recognition (ASR) pipeline! Your completed pipeline will accept raw audio as input and return a predicted transcription of the spoken language. The full pipeline is summarized in the figure below.\n\n<img src=\"images/pipeline.png\">\n\n- **STEP 1** is a pre-processing step that converts raw audio to one of two feature representations that are commonly used for ASR. \n- **STEP 2** is an acoustic model which accepts audio features as input and returns a probability distribution over all potential transcriptions. After learning about the basic types of neural networks that are often used for acoustic modeling, you will engage in your own investigations, to design your own acoustic model!\n- **STEP 3** in the pipeline takes the output from the acoustic model and returns a predicted transcription. \n\nFeel free to use the links below to navigate the notebook:\n- [The Data](#thedata)\n- [**STEP 1**](#step1): Acoustic Features for Speech Recognition\n- [**STEP 2**](#step2): Deep Neural Networks for Acoustic Modeling\n - [Model 0](#model0): RNN\n - [Model 1](#model1): RNN + TimeDistributed Dense\n - [Model 2](#model2): CNN + RNN + TimeDistributed Dense\n - [Model 3](#model3): Deeper RNN + TimeDistributed Dense\n - [Model 4](#model4): Bidirectional RNN + TimeDistributed Dense\n - [Models 5+](#model5)\n - [Compare the Models](#compare)\n - [Final Model](#final)\n- [**STEP 3**](#step3): Obtain Predictions\n\n<a id='thedata'></a>\n## The Data\n\nWe begin by investigating the dataset that will be used to train and evaluate your pipeline. [LibriSpeech](http://www.danielpovey.com/files/2015_icassp_librispeech.pdf) is a large corpus of English-read speech, designed for training and evaluating models for ASR. The dataset contains 1000 hours of speech derived from audiobooks. We will work with a small subset in this project, since larger-scale data would take a long while to train. However, after completing this project, if you are interested in exploring further, you are encouraged to work with more of the data that is provided [online](http://www.openslr.org/12/).\n\nIn the code cells below, you will use the `vis_train_features` module to visualize a training example. The supplied argument `index=0` tells the module to extract the first example in the training set. (You are welcome to change `index=0` to point to a different training example, if you like, but please **DO NOT** amend any other code in the cell.) The returned variables are:\n- `vis_text` - transcribed text (label) for the training example.\n- `vis_raw_audio` - raw audio waveform for the training example.\n- `vis_mfcc_feature` - mel-frequency cepstral coefficients (MFCCs) for the training example.\n- `vis_spectrogram_feature` - spectrogram for the training example. \n- `vis_audio_path` - the file path to the training example.", "_____no_output_____" ] ], [ [ "from data_generator import vis_train_features\n\n# extract label and audio features for a single training example\nvis_text, vis_raw_audio, vis_mfcc_feature, vis_spectrogram_feature, vis_audio_path = vis_train_features()", "There are 2136 total training examples.\n" ] ], [ [ "The following code cell visualizes the audio waveform for your chosen example, along with the corresponding transcript. You also have the option to play the audio in the notebook!", "_____no_output_____" ] ], [ [ "from IPython.display import Markdown, display\nfrom data_generator import vis_train_features, plot_raw_audio\nfrom IPython.display import Audio\n%matplotlib inline\n\n# plot audio signal\nplot_raw_audio(vis_raw_audio)\n# print length of audio signal\ndisplay(Markdown('**Shape of Audio Signal** : ' + str(vis_raw_audio.shape)))\n# print transcript corresponding to audio clip\ndisplay(Markdown('**Transcript** : ' + str(vis_text)))\n# play the audio file\nAudio(vis_audio_path)", "_____no_output_____" ] ], [ [ "<a id='step1'></a>\n## STEP 1: Acoustic Features for Speech Recognition\n\nFor this project, you won't use the raw audio waveform as input to your model. Instead, we provide code that first performs a pre-processing step to convert the raw audio to a feature representation that has historically proven successful for ASR models. Your acoustic model will accept the feature representation as input.\n\nIn this project, you will explore two possible feature representations. _After completing the project_, if you'd like to read more about deep learning architectures that can accept raw audio input, you are encouraged to explore this [research paper](https://pdfs.semanticscholar.org/a566/cd4a8623d661a4931814d9dffc72ecbf63c4.pdf).\n\n### Spectrograms\n\nThe first option for an audio feature representation is the [spectrogram](https://www.youtube.com/watch?v=_FatxGN3vAM). In order to complete this project, you will **not** need to dig deeply into the details of how a spectrogram is calculated; but, if you are curious, the code for calculating the spectrogram was borrowed from [this repository](https://github.com/baidu-research/ba-dls-deepspeech). The implementation appears in the `utils.py` file in your repository.\n\nThe code that we give you returns the spectrogram as a 2D tensor, where the first (_vertical_) dimension indexes time, and the second (_horizontal_) dimension indexes frequency. To speed the convergence of your algorithm, we have also normalized the spectrogram. (You can see this quickly in the visualization below by noting that the mean value hovers around zero, and most entries in the tensor assume values close to zero.)", "_____no_output_____" ] ], [ [ "from data_generator import plot_spectrogram_feature\n\n# plot normalized spectrogram\nplot_spectrogram_feature(vis_spectrogram_feature)\n# print shape of spectrogram\ndisplay(Markdown('**Shape of Spectrogram** : ' + str(vis_spectrogram_feature.shape)))", "_____no_output_____" ] ], [ [ "### Mel-Frequency Cepstral Coefficients (MFCCs)\n\nThe second option for an audio feature representation is [MFCCs](https://en.wikipedia.org/wiki/Mel-frequency_cepstrum). You do **not** need to dig deeply into the details of how MFCCs are calculated, but if you would like more information, you are welcome to peruse the [documentation](https://github.com/jameslyons/python_speech_features) of the `python_speech_features` Python package. Just as with the spectrogram features, the MFCCs are normalized in the supplied code.\n\nThe main idea behind MFCC features is the same as spectrogram features: at each time window, the MFCC feature yields a feature vector that characterizes the sound within the window. Note that the MFCC feature is much lower-dimensional than the spectrogram feature, which could help an acoustic model to avoid overfitting to the training dataset. ", "_____no_output_____" ] ], [ [ "from data_generator import plot_mfcc_feature\n\n# plot normalized MFCC\nplot_mfcc_feature(vis_mfcc_feature)\n# print shape of MFCC\ndisplay(Markdown('**Shape of MFCC** : ' + str(vis_mfcc_feature.shape)))", "_____no_output_____" ] ], [ [ "When you construct your pipeline, you will be able to choose to use either spectrogram or MFCC features. If you would like to see different implementations that make use of MFCCs and/or spectrograms, please check out the links below:\n- This [repository](https://github.com/baidu-research/ba-dls-deepspeech) uses spectrograms.\n- This [repository](https://github.com/mozilla/DeepSpeech) uses MFCCs.\n- This [repository](https://github.com/buriburisuri/speech-to-text-wavenet) also uses MFCCs.\n- This [repository](https://github.com/pannous/tensorflow-speech-recognition/blob/master/speech_data.py) experiments with raw audio, spectrograms, and MFCCs as features.", "_____no_output_____" ], [ "<a id='step2'></a>\n## STEP 2: Deep Neural Networks for Acoustic Modeling\n\nIn this section, you will experiment with various neural network architectures for acoustic modeling. \n\nYou will begin by training five relatively simple architectures. **Model 0** is provided for you. You will write code to implement **Models 1**, **2**, **3**, and **4**. If you would like to experiment further, you are welcome to create and train more models under the **Models 5+** heading. \n\nAll models will be specified in the `sample_models.py` file. After importing the `sample_models` module, you will train your architectures in the notebook.\n\nAfter experimenting with the five simple architectures, you will have the opportunity to compare their performance. Based on your findings, you will construct a deeper architecture that is designed to outperform all of the shallow models.\n\nFor your convenience, we have designed the notebook so that each model can be specified and trained on separate occasions. That is, say you decide to take a break from the notebook after training **Model 1**. Then, you need not re-execute all prior code cells in the notebook before training **Model 2**. You need only re-execute the code cell below, that is marked with **`RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK`**, before transitioning to the code cells corresponding to **Model 2**.", "_____no_output_____" ] ], [ [ "#####################################################################\n# RUN THIS CODE CELL IF YOU ARE RESUMING THE NOTEBOOK AFTER A BREAK #\n#####################################################################\n\n# allocate 50% of GPU memory (if you like, feel free to change this)\nfrom keras.backend.tensorflow_backend import set_session\nimport tensorflow as tf \nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.4\nset_session(tf.Session(config=config))\n\n# watch for any changes in the sample_models module, and reload it automatically\n%load_ext autoreload\n%autoreload 2\n# import NN architectures for speech recognition\nfrom sample_models import *\n# import function for training acoustic model\nfrom train_utils import train_model", "Using TensorFlow backend.\n/home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/h5py/__init__.py:34: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\n from ._conv import register_converters as _register_converters\n" ] ], [ [ "<a id='model0'></a>\n### Model 0: RNN\n\nGiven their effectiveness in modeling sequential data, the first acoustic model you will use is an RNN. As shown in the figure below, the RNN we supply to you will take the time sequence of audio features as input.\n\n<img src=\"images/simple_rnn.png\" width=\"50%\">\n\nAt each time step, the speaker pronounces one of 28 possible characters, including each of the 26 letters in the English alphabet, along with a space character (\" \"), and an apostrophe (').\n\nThe output of the RNN at each time step is a vector of probabilities with 29 entries, where the $i$-th entry encodes the probability that the $i$-th character is spoken in the time sequence. (The extra 29th character is an empty \"character\" used to pad training examples within batches containing uneven lengths.) If you would like to peek under the hood at how characters are mapped to indices in the probability vector, look at the `char_map.py` file in the repository. The figure below shows an equivalent, rolled depiction of the RNN that shows the output layer in greater detail. \n\n<img src=\"images/simple_rnn_unrolled.png\" width=\"60%\">\n\nThe model has already been specified for you in Keras. To import it, you need only run the code cell below. ", "_____no_output_____" ] ], [ [ "model_0 = simple_rnn_model(input_dim=161) # change to 13 if you would like to use MFCC features", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nrnn (GRU) (None, None, 29) 16617 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 16,617\nTrainable params: 16,617\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "As explored in the lesson, you will train the acoustic model with the [CTC loss](http://www.cs.toronto.edu/~graves/icml_2006.pdf) criterion. Custom loss functions take a bit of hacking in Keras, and so we have implemented the CTC loss function for you, so that you can focus on trying out as many deep learning architectures as possible :). If you'd like to peek at the implementation details, look at the `add_ctc_loss` function within the `train_utils.py` file in the repository.\n\nTo train your architecture, you will use the `train_model` function within the `train_utils` module; it has already been imported in one of the above code cells. The `train_model` function takes three **required** arguments:\n- `input_to_softmax` - a Keras model instance.\n- `pickle_path` - the name of the pickle file where the loss history will be saved.\n- `save_model_path` - the name of the HDF5 file where the model will be saved.\n\nIf we have already supplied values for `input_to_softmax`, `pickle_path`, and `save_model_path`, please **DO NOT** modify these values. \n\nThere are several **optional** arguments that allow you to have more control over the training process. You are welcome to, but not required to, supply your own values for these arguments.\n- `minibatch_size` - the size of the minibatches that are generated while training the model (default: `20`).\n- `spectrogram` - Boolean value dictating whether spectrogram (`True`) or MFCC (`False`) features are used for training (default: `True`).\n- `mfcc_dim` - the size of the feature dimension to use when generating MFCC features (default: `13`).\n- `optimizer` - the Keras optimizer used to train the model (default: `SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)`). \n- `epochs` - the number of epochs to use to train the model (default: `20`). If you choose to modify this parameter, make sure that it is *at least* 20.\n- `verbose` - controls the verbosity of the training output in the `model.fit_generator` method (default: `1`).\n- `sort_by_duration` - Boolean value dictating whether the training and validation sets are sorted by (increasing) duration before the start of the first epoch (default: `False`).\n\nThe `train_model` function defaults to using spectrogram features; if you choose to use these features, note that the acoustic model in `simple_rnn_model` should have `input_dim=161`. Otherwise, if you choose to use MFCC features, the acoustic model should have `input_dim=13`.\n\nWe have chosen to use `GRU` units in the supplied RNN. If you would like to experiment with `LSTM` or `SimpleRNN` cells, feel free to do so here. If you change the `GRU` units to `SimpleRNN` cells in `simple_rnn_model`, you may notice that the loss quickly becomes undefined (`nan`) - you are strongly encouraged to check this for yourself! This is due to the [exploding gradients problem](http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/). We have already implemented [gradient clipping](https://arxiv.org/pdf/1211.5063.pdf) in your optimizer to help you avoid this issue.\n\n__IMPORTANT NOTE:__ If you notice that your gradient has exploded in any of the models below, feel free to explore more with gradient clipping (the `clipnorm` argument in your optimizer) or swap out any `SimpleRNN` cells for `LSTM` or `GRU` cells. You can also try restarting the kernel to restart the training process.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_0, \n pickle_path='model_0.pickle', \n save_model_path='model_0.h5',\n optimizer=SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 116s - loss: 962.2045 - val_loss: 746.4123\nEpoch 2/20\n106/106 [==============================] - 111s - loss: 757.1928 - val_loss: 729.0466\nEpoch 3/20\n106/106 [==============================] - 116s - loss: 753.0298 - val_loss: 730.4964\nEpoch 4/20\n106/106 [==============================] - 115s - loss: 750.8956 - val_loss: 721.6433\nEpoch 5/20\n106/106 [==============================] - 115s - loss: 751.6414 - val_loss: 726.6612\nEpoch 6/20\n106/106 [==============================] - 115s - loss: 750.7420 - val_loss: 727.9034\nEpoch 7/20\n106/106 [==============================] - 112s - loss: 750.2763 - val_loss: 729.9839\nEpoch 8/20\n106/106 [==============================] - 116s - loss: 751.5226 - val_loss: 723.4622\nEpoch 9/20\n106/106 [==============================] - 117s - loss: 750.4366 - val_loss: 721.1129\nEpoch 10/20\n106/106 [==============================] - 117s - loss: 751.0709 - val_loss: 733.4978\nEpoch 11/20\n106/106 [==============================] - 116s - loss: 751.7690 - val_loss: 725.5819\nEpoch 12/20\n106/106 [==============================] - 117s - loss: 750.4331 - val_loss: 728.1983\nEpoch 13/20\n106/106 [==============================] - 116s - loss: 750.6872 - val_loss: 721.3921\nEpoch 14/20\n106/106 [==============================] - 117s - loss: 750.7719 - val_loss: 723.6158\nEpoch 15/20\n106/106 [==============================] - 117s - loss: 749.6198 - val_loss: 728.7696\nEpoch 16/20\n106/106 [==============================] - 115s - loss: 750.4491 - val_loss: 723.0323\nEpoch 17/20\n106/106 [==============================] - 116s - loss: 750.8921 - val_loss: 725.6289\nEpoch 18/20\n106/106 [==============================] - 116s - loss: 750.8845 - val_loss: 725.6971\nEpoch 19/20\n106/106 [==============================] - 116s - loss: 750.1892 - val_loss: 722.8667\nEpoch 20/20\n106/106 [==============================] - 117s - loss: 750.7994 - val_loss: 724.6980\n" ] ], [ [ "<a id='model1'></a>\n### (IMPLEMENTATION) Model 1: RNN + TimeDistributed Dense\n\nRead about the [TimeDistributed](https://keras.io/layers/wrappers/) wrapper and the [BatchNormalization](https://keras.io/layers/normalization/) layer in the Keras documentation. For your next architecture, you will add [batch normalization](https://arxiv.org/pdf/1510.01378.pdf) to the recurrent layer to reduce training times. The `TimeDistributed` layer will be used to find more complex patterns in the dataset. The unrolled snapshot of the architecture is depicted below.\n\n<img src=\"images/rnn_model.png\" width=\"60%\">\n\nThe next figure shows an equivalent, rolled depiction of the RNN that shows the (`TimeDistrbuted`) dense and output layers in greater detail. \n\n<img src=\"images/rnn_model_unrolled.png\" width=\"60%\">\n\nUse your research to complete the `rnn_model` function within the `sample_models.py` file. The function should specify an architecture that satisfies the following requirements:\n- The first layer of the neural network should be an RNN (`SimpleRNN`, `LSTM`, or `GRU`) that takes the time sequence of audio features as input. We have added `GRU` units for you, but feel free to change `GRU` to `SimpleRNN` or `LSTM`, if you like!\n- Whereas the architecture in `simple_rnn_model` treated the RNN output as the final layer of the model, you will use the output of your RNN as a hidden layer. Use `TimeDistributed` to apply a `Dense` layer to each of the time steps in the RNN output. Ensure that each `Dense` layer has `output_dim` units.\n\nUse the code cell below to load your model into the `model_1` variable. Use a value for `input_dim` that matches your chosen audio features, and feel free to change the values for `units` and `activation` to tweak the behavior of your recurrent layer.", "_____no_output_____" ] ], [ [ "model_1 = rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n units=246,\n activation='relu',\n dropout_rate=0.0)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nrnn (GRU) (None, None, 246) 301104 \n_________________________________________________________________\nbatch_normalization_10 (Batc (None, None, 246) 984 \n_________________________________________________________________\ntime_distributed_11 (TimeDis (None, None, 29) 7163 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 309,251\nTrainable params: 308,759\nNon-trainable params: 492\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_1.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_1.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "from keras.optimizers import SGD\n\ntrain_model(input_to_softmax=model_1, \n pickle_path='model_1.pickle', \n save_model_path='model_1.h5',\n optimizer=SGD(lr=0.07693823225442271, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 125s - loss: 301.3889 - val_loss: 255.1117\nEpoch 2/20\n106/106 [==============================] - 126s - loss: 208.7791 - val_loss: 195.5662\nEpoch 3/20\n106/106 [==============================] - 126s - loss: 188.6020 - val_loss: 184.3830\nEpoch 4/20\n106/106 [==============================] - 126s - loss: 172.8454 - val_loss: 165.9265\nEpoch 5/20\n106/106 [==============================] - 126s - loss: 159.9952 - val_loss: 160.3791\nEpoch 6/20\n106/106 [==============================] - 126s - loss: 151.2288 - val_loss: 150.3075\nEpoch 7/20\n106/106 [==============================] - 125s - loss: 144.6389 - val_loss: 147.3992\nEpoch 8/20\n106/106 [==============================] - 126s - loss: 139.3690 - val_loss: 143.2048\nEpoch 9/20\n106/106 [==============================] - 124s - loss: 134.5651 - val_loss: 140.9699\nEpoch 10/20\n106/106 [==============================] - 126s - loss: 130.5984 - val_loss: 139.4818\nEpoch 11/20\n106/106 [==============================] - 125s - loss: 127.2223 - val_loss: 134.7147\nEpoch 12/20\n106/106 [==============================] - 125s - loss: 124.1384 - val_loss: 135.1391\nEpoch 13/20\n106/106 [==============================] - 127s - loss: 121.4931 - val_loss: 135.6264\nEpoch 14/20\n106/106 [==============================] - 139s - loss: 119.0370 - val_loss: 132.6101\nEpoch 15/20\n106/106 [==============================] - 149s - loss: 117.5036 - val_loss: 135.2287\nEpoch 16/20\n106/106 [==============================] - 149s - loss: 115.1628 - val_loss: 134.6172\nEpoch 17/20\n106/106 [==============================] - 148s - loss: 114.1567 - val_loss: 133.6147\nEpoch 18/20\n106/106 [==============================] - 149s - loss: 113.1525 - val_loss: 131.8664\nEpoch 19/20\n106/106 [==============================] - 151s - loss: 110.8212 - val_loss: 133.1285\nEpoch 20/20\n106/106 [==============================] - 149s - loss: 109.7723 - val_loss: 133.2252\n" ] ], [ [ "<a id='model2'></a>\n### (IMPLEMENTATION) Model 2: CNN + RNN + TimeDistributed Dense\n\nThe architecture in `cnn_rnn_model` adds an additional level of complexity, by introducing a [1D convolution layer](https://keras.io/layers/convolutional/#conv1d). \n\n<img src=\"images/cnn_rnn_model.png\" width=\"100%\">\n\nThis layer incorporates many arguments that can be (optionally) tuned when calling the `cnn_rnn_model` module. We provide sample starting parameters, which you might find useful if you choose to use spectrogram audio features. \n\nIf you instead want to use MFCC features, these arguments will have to be tuned. Note that the current architecture only supports values of `'same'` or `'valid'` for the `conv_border_mode` argument.\n\nWhen tuning the parameters, be careful not to choose settings that make the convolutional layer overly small. If the temporal length of the CNN layer is shorter than the length of the transcribed text label, your code will throw an error.\n\nBefore running the code cell below, you must modify the `cnn_rnn_model` function in `sample_models.py`. Please add batch normalization to the recurrent layer, and provide the same `TimeDistributed` layer as before.", "_____no_output_____" ] ], [ [ "model_2 = cnn_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n filters=185,\n kernel_size=5, \n conv_stride=3,\n conv_border_mode='valid',\n units=350,\n dropout_rate=0.5)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nconv1d (Conv1D) (None, None, 185) 149110 \n_________________________________________________________________\nbn_conv_1d (BatchNormalizati (None, None, 185) 740 \n_________________________________________________________________\nrnn (GRU) (None, None, 350) 562800 \n_________________________________________________________________\nbatch_normalization_18 (Batc (None, None, 350) 1400 \n_________________________________________________________________\ntime_distributed_18 (TimeDis (None, None, 29) 10179 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 724,229\nTrainable params: 723,159\nNon-trainable params: 1,070\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_2.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_2.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "from keras.optimizers import SGD\n\ntrain_model(input_to_softmax=model_2, \n pickle_path='model_2.pickle', \n save_model_path='model_2.h5',\n optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 47s - loss: 258.7976 - val_loss: 215.1476\nEpoch 2/20\n106/106 [==============================] - 44s - loss: 210.2469 - val_loss: 195.7121\nEpoch 3/20\n106/106 [==============================] - 44s - loss: 194.4411 - val_loss: 176.9136\nEpoch 4/20\n106/106 [==============================] - 44s - loss: 184.4350 - val_loss: 164.3036\nEpoch 5/20\n106/106 [==============================] - 44s - loss: 176.8723 - val_loss: 161.7172\nEpoch 6/20\n106/106 [==============================] - 45s - loss: 171.1767 - val_loss: 155.6394\nEpoch 7/20\n106/106 [==============================] - 44s - loss: 166.1970 - val_loss: 150.5580\nEpoch 8/20\n106/106 [==============================] - 45s - loss: 162.9583 - val_loss: 150.3715\nEpoch 9/20\n106/106 [==============================] - 45s - loss: 159.4488 - val_loss: 146.7499\nEpoch 10/20\n106/106 [==============================] - 44s - loss: 156.4711 - val_loss: 143.3999\nEpoch 11/20\n106/106 [==============================] - 44s - loss: 153.5752 - val_loss: 141.8302\nEpoch 12/20\n106/106 [==============================] - 44s - loss: 151.9115 - val_loss: 141.0765\nEpoch 13/20\n106/106 [==============================] - 45s - loss: 149.8154 - val_loss: 140.0649\nEpoch 14/20\n106/106 [==============================] - 44s - loss: 148.0079 - val_loss: 138.6670\nEpoch 15/20\n106/106 [==============================] - 45s - loss: 146.1044 - val_loss: 138.5527\nEpoch 16/20\n106/106 [==============================] - 45s - loss: 144.4150 - val_loss: 135.1045\nEpoch 17/20\n106/106 [==============================] - 44s - loss: 143.2880 - val_loss: 135.8767\nEpoch 18/20\n106/106 [==============================] - 45s - loss: 141.8172 - val_loss: 134.6186\nEpoch 19/20\n106/106 [==============================] - 44s - loss: 140.8268 - val_loss: 130.9444\nEpoch 20/20\n106/106 [==============================] - 45s - loss: 139.1327 - val_loss: 132.9859\n" ] ], [ [ "<a id='model3'></a>\n### (IMPLEMENTATION) Model 3: Deeper RNN + TimeDistributed Dense\n\nReview the code in `rnn_model`, which makes use of a single recurrent layer. Now, specify an architecture in `deep_rnn_model` that utilizes a variable number `recur_layers` of recurrent layers. The figure below shows the architecture that should be returned if `recur_layers=2`. In the figure, the output sequence of the first recurrent layer is used as input for the next recurrent layer.\n\n<img src=\"images/deep_rnn_model.png\" width=\"80%\">\n\nFeel free to change the supplied values of `units` to whatever you think performs best. You can change the value of `recur_layers`, as long as your final value is greater than 1. (As a quick check that you have implemented the additional functionality in `deep_rnn_model` correctly, make sure that the architecture that you specify here is identical to `rnn_model` if `recur_layers=1`.)", "_____no_output_____" ] ], [ [ "model_3 = deep_rnn_model(input_dim=161, # change to 13 if you would like to use MFCC features\n units=290,\n recur_layers=3,\n dropout_rate=0.3035064397585259) ", "WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1190: calling reduce_sum (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\nWARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1154: calling reduce_max (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\ngru_1 (GRU) (None, None, 290) 393240 \n_________________________________________________________________\nbatch_normalization_1 (Batch (None, None, 290) 1160 \n_________________________________________________________________\ngru_2 (GRU) (None, None, 290) 505470 \n_________________________________________________________________\nbatch_normalization_2 (Batch (None, None, 290) 1160 \n_________________________________________________________________\ngru_3 (GRU) (None, None, 290) 505470 \n_________________________________________________________________\nbatch_normalization_3 (Batch (None, None, 290) 1160 \n_________________________________________________________________\ntime_distributed_1 (TimeDist (None, None, 29) 8439 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 1,416,099\nTrainable params: 1,414,359\nNon-trainable params: 1,740\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_3.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_3.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "from keras.optimizers import SGD\n\ntrain_model(input_to_softmax=model_3, \n pickle_path='model_3.pickle', \n save_model_path='model_3.h5', \n optimizer=SGD(lr=0.0635459438114008, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1297: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\nWARNING:tensorflow:Variable *= will be deprecated. Use `var.assign(var * other)` if you want assignment to the variable value or `x = x * y` if you want a new python Tensor object.\nEpoch 1/20\n106/106 [==============================] - 346s - loss: 306.0022 - val_loss: 230.8003\nEpoch 2/20\n106/106 [==============================] - 356s - loss: 228.5843 - val_loss: 208.5356\nEpoch 3/20\n106/106 [==============================] - 359s - loss: 221.6973 - val_loss: 202.8349\nEpoch 4/20\n106/106 [==============================] - 359s - loss: 214.3285 - val_loss: 193.8858\nEpoch 5/20\n106/106 [==============================] - 357s - loss: 206.8792 - val_loss: 190.5545\nEpoch 6/20\n106/106 [==============================] - 354s - loss: 197.0360 - val_loss: 180.5237\nEpoch 7/20\n106/106 [==============================] - 359s - loss: 186.1461 - val_loss: 173.6953\nEpoch 8/20\n106/106 [==============================] - 360s - loss: 177.0056 - val_loss: 156.9757\nEpoch 9/20\n106/106 [==============================] - 354s - loss: 170.2661 - val_loss: 152.6200\nEpoch 10/20\n106/106 [==============================] - 361s - loss: 166.5991 - val_loss: 152.2965\nEpoch 11/20\n106/106 [==============================] - 357s - loss: 164.0009 - val_loss: 146.9104\nEpoch 12/20\n106/106 [==============================] - 358s - loss: 160.5796 - val_loss: 142.4707\nEpoch 13/20\n106/106 [==============================] - 357s - loss: 157.5561 - val_loss: 142.0041\nEpoch 14/20\n106/106 [==============================] - 362s - loss: 156.8146 - val_loss: 141.1184\nEpoch 15/20\n106/106 [==============================] - 360s - loss: 154.9966 - val_loss: 140.2970\nEpoch 16/20\n106/106 [==============================] - 360s - loss: 152.4878 - val_loss: 138.7927\nEpoch 17/20\n106/106 [==============================] - 359s - loss: 152.0400 - val_loss: 136.6318\nEpoch 18/20\n106/106 [==============================] - 359s - loss: 149.7010 - val_loss: 137.6884\nEpoch 19/20\n106/106 [==============================] - 354s - loss: 148.1568 - val_loss: 130.7026\nEpoch 20/20\n106/106 [==============================] - 357s - loss: 148.5415 - val_loss: 136.0464\n" ] ], [ [ "<a id='model4'></a>\n### (IMPLEMENTATION) Model 4: Bidirectional RNN + TimeDistributed Dense\n\nRead about the [Bidirectional](https://keras.io/layers/wrappers/) wrapper in the Keras documentation. For your next architecture, you will specify an architecture that uses a single bidirectional RNN layer, before a (`TimeDistributed`) dense layer. The added value of a bidirectional RNN is described well in [this paper](http://www.cs.toronto.edu/~hinton/absps/DRNN_speech.pdf).\n> One shortcoming of conventional RNNs is that they are only able to make use of previous context. In speech recognition, where whole utterances are transcribed at once, there is no reason not to exploit future context as well. Bidirectional RNNs (BRNNs) do this by processing the data in both directions with two separate hidden layers which are then fed forwards to the same output layer.\n\n<img src=\"images/bidirectional_rnn_model.png\" width=\"80%\">\n\nBefore running the code cell below, you must complete the `bidirectional_rnn_model` function in `sample_models.py`. Feel free to use `SimpleRNN`, `LSTM`, or `GRU` units. When specifying the `Bidirectional` wrapper, use `merge_mode='concat'`.", "_____no_output_____" ] ], [ [ "model_4 = bidirectional_rnn_model(\n input_dim=161, # change to 13 if you would like to use MFCC features\n units=250,\n dropout_rate=0.1)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nbidirectional_9 (Bidirection (None, None, 500) 618000 \n_________________________________________________________________\ntime_distributed_28 (TimeDis (None, None, 29) 14529 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 632,529\nTrainable params: 632,529\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_4.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_4.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "train_model(input_to_softmax=model_4, \n pickle_path='model_4.pickle', \n save_model_path='model_4.h5', \n optimizer=SGD(lr=0.06, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 205s - loss: 275.6266 - val_loss: 226.8717\nEpoch 2/20\n106/106 [==============================] - 205s - loss: 213.2997 - val_loss: 201.3109\nEpoch 3/20\n106/106 [==============================] - 204s - loss: 200.7651 - val_loss: 186.7573\nEpoch 4/20\n106/106 [==============================] - 205s - loss: 193.3435 - val_loss: 182.8960\nEpoch 5/20\n106/106 [==============================] - 204s - loss: 187.6618 - val_loss: 173.7006\nEpoch 6/20\n106/106 [==============================] - 204s - loss: 182.4469 - val_loss: 177.4735\nEpoch 7/20\n106/106 [==============================] - 204s - loss: 177.6839 - val_loss: 169.6660\nEpoch 8/20\n106/106 [==============================] - 204s - loss: 173.7626 - val_loss: 169.5262\nEpoch 9/20\n106/106 [==============================] - 205s - loss: 169.5368 - val_loss: 162.4727\nEpoch 10/20\n106/106 [==============================] - 204s - loss: 166.1426 - val_loss: 161.0329\nEpoch 11/20\n106/106 [==============================] - 205s - loss: 162.1614 - val_loss: 159.1479\nEpoch 12/20\n106/106 [==============================] - 205s - loss: 159.1850 - val_loss: 154.9204\nEpoch 13/20\n106/106 [==============================] - 204s - loss: 156.0412 - val_loss: 149.9123\nEpoch 14/20\n106/106 [==============================] - 204s - loss: 153.1229 - val_loss: 151.7496\nEpoch 15/20\n106/106 [==============================] - 204s - loss: 150.3786 - val_loss: 147.4174\nEpoch 16/20\n106/106 [==============================] - 205s - loss: 148.0250 - val_loss: 148.3927\nEpoch 17/20\n106/106 [==============================] - 203s - loss: 145.1999 - val_loss: 142.2009\nEpoch 18/20\n106/106 [==============================] - 205s - loss: 143.4235 - val_loss: 142.9599\nEpoch 19/20\n106/106 [==============================] - 205s - loss: 140.9955 - val_loss: 141.4220\nEpoch 20/20\n106/106 [==============================] - 204s - loss: 139.4114 - val_loss: 138.3626\n" ] ], [ [ "<a id='model5'></a>\n### (OPTIONAL IMPLEMENTATION) Models 5+\n\nIf you would like to try out more architectures than the ones above, please use the code cell below. Please continue to follow the same convention for saving the models; for the $i$-th sample model, please save the loss at **`model_i.pickle`** and saving the trained model at **`model_i.h5`**.", "_____no_output_____" ] ], [ [ "model_5 = cnn2d_rnn_model(\n input_dim=161, # change to 13 if you would like to use MFCC features\n filters=50,\n kernel_size=(11,11), \n conv_stride=1,\n conv_border_mode='same',\n pool_size=(1,5),\n units=200,\n dropout_rate=0.1)", "_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nlambda_8 (Lambda) (None, None, 161, 1) 0 \n_________________________________________________________________\nconv2d_5 (Conv2D) (None, None, 161, 50) 6100 \n_________________________________________________________________\nmax_pooling2d_5 (MaxPooling2 (None, None, 32, 50) 0 \n_________________________________________________________________\ntime_distributed_11 (TimeDis (None, None, 1600) 0 \n_________________________________________________________________\ngru_8 (GRU) (None, None, 200) 1080600 \n_________________________________________________________________\ntime_distributed_12 (TimeDis (None, None, 29) 5829 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 1,092,529\nTrainable params: 1,092,529\nNon-trainable params: 0\n_________________________________________________________________\nNone\n" ], [ "from keras.optimizers import SGD\n\ntrain_model(input_to_softmax=model_5, \n pickle_path='model_5.pickle', \n save_model_path='model_5.h5', \n optimizer=SGD(lr=0.06, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 137s - loss: 285.0588 - val_loss: 228.7582\nEpoch 2/20\n106/106 [==============================] - 129s - loss: 230.2834 - val_loss: 213.1584\nEpoch 3/20\n106/106 [==============================] - 126s - loss: 213.9887 - val_loss: 194.7103\nEpoch 4/20\n106/106 [==============================] - 126s - loss: 197.2486 - val_loss: 179.5294\nEpoch 5/20\n106/106 [==============================] - 126s - loss: 180.5510 - val_loss: 166.0413\nEpoch 6/20\n106/106 [==============================] - 125s - loss: 166.6758 - val_loss: 153.4104\nEpoch 7/20\n106/106 [==============================] - 125s - loss: 157.2719 - val_loss: 144.9292\nEpoch 8/20\n106/106 [==============================] - 126s - loss: 150.0972 - val_loss: 142.1533\nEpoch 9/20\n106/106 [==============================] - 125s - loss: 143.9420 - val_loss: 138.1702\nEpoch 10/20\n106/106 [==============================] - 124s - loss: 138.9901 - val_loss: 132.9487\nEpoch 11/20\n106/106 [==============================] - 125s - loss: 135.0339 - val_loss: 131.0782\nEpoch 12/20\n106/106 [==============================] - 125s - loss: 131.4873 - val_loss: 129.4672\nEpoch 13/20\n106/106 [==============================] - 124s - loss: 128.0020 - val_loss: 128.9729\nEpoch 14/20\n106/106 [==============================] - 124s - loss: 125.3787 - val_loss: 126.8662\nEpoch 15/20\n106/106 [==============================] - 124s - loss: 122.5167 - val_loss: 122.6902\nEpoch 16/20\n106/106 [==============================] - 124s - loss: 119.9851 - val_loss: 123.2564\nEpoch 17/20\n106/106 [==============================] - 125s - loss: 117.8803 - val_loss: 121.1491\nEpoch 18/20\n106/106 [==============================] - 124s - loss: 115.6461 - val_loss: 121.6285\nEpoch 19/20\n106/106 [==============================] - 124s - loss: 113.2564 - val_loss: 119.9097\nEpoch 20/20\n106/106 [==============================] - 125s - loss: 111.4150 - val_loss: 118.3596\n" ] ], [ [ "<a id='compare'></a>\n### Compare the Models\n\nExecute the code cell below to evaluate the performance of the drafted deep learning models. The training and validation loss are plotted for each model.", "_____no_output_____" ] ], [ [ "from glob import glob\nimport numpy as np\nimport _pickle as pickle\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n%matplotlib inline\nsns.set_style(style='white')\n\n# obtain the paths for the saved model history\nall_pickles = sorted(glob(\"results/*.pickle\"))\n# extract the name of each model\nmodel_names = [item[8:-7] for item in all_pickles]\n# extract the loss history for each model\nvalid_loss = [pickle.load( open( i, \"rb\" ) )['val_loss'] for i in all_pickles]\ntrain_loss = [pickle.load( open( i, \"rb\" ) )['loss'] for i in all_pickles]\n# save the number of epochs used to train each model\nnum_epochs = [len(valid_loss[i]) for i in range(len(valid_loss))]\n\nfig = plt.figure(figsize=(16,5))\n\n# plot the training loss vs. epoch for each model\nax1 = fig.add_subplot(121)\nfor i in range(len(all_pickles)):\n ax1.plot(np.linspace(1, num_epochs[i], num_epochs[i]), \n train_loss[i], label=model_names[i])\n# clean up the plot\nax1.legend() \nax1.set_xlim([1, max(num_epochs)])\nplt.xlabel('Epoch')\nplt.ylabel('Training Loss')\n\n# plot the validation loss vs. epoch for each model\nax2 = fig.add_subplot(122)\nfor i in range(len(all_pickles)):\n ax2.plot(np.linspace(1, num_epochs[i], num_epochs[i]), \n valid_loss[i], label=model_names[i])\n# clean up the plot\nax2.legend() \nax2.set_xlim([1, max(num_epochs)])\nplt.xlabel('Epoch')\nplt.ylabel('Validation Loss')\nplt.show()", "_____no_output_____" ] ], [ [ "##### __Question 1:__ Use the plot above to analyze the performance of each of the attempted architectures. Which performs best? Provide an explanation regarding why you think some models perform better than others. \n\n__Answer:__\n\nThe following table gives the model performance in ascending order of (best) validation loss.\n\n| Rank | Model | Description | Best Loss | \n| -- | -- | -- | -- |\n| 1 | 5| 2D CNN + RNN + TimeDistributed Dense | 118.3596 |\n| 2 | 3 | Deeper RNN + TimeDistributed Dense | 130.7026 | \n| 3 | 2 | CNN + RNN + TimeDistributed Dense | 130.9444 | \n| 4 | 1 | RNN + TimeDistributed Dense | 131.8664 | \n| 5 | 4 | Bidirectional RNN + TimeDistributed Dense | 138.3626 |\n| 6 | 0 | RNN | 721.1129 | \n\n\nAll of the time distributed models perform well, indicating that the time series gives valuable signal (as expected). The models that preprocessed the input with CNNs performed well, but we prone to overfitting. The network with the two-dimensional convolutional layer performed best, indicating that the convolutional layer can produce features beyond what a time series model alone infer. In particular, the frequency dimension has informative patterns that can be mined. Deeper recurrent layers do not seem to add much to performance, as evidenced by the model 3 to model 1 comparison, within the 20-epoch evaluation. Models 3 and 4, with sufficient dropout rates, do seem like they are not prone to overfitting and may perform better with more epochs than the models with convolutional layers. The latter two models both use recurrent layers that are less prone to gradient explosions, possibly why they take longer to train.\n\nThe final model combines the best convolutional layer with the bidirectional RNN with time distributed dense layers.", "_____no_output_____" ], [ "<a id='final'></a>\n### (IMPLEMENTATION) Final Model\n\nNow that you've tried out many sample models, use what you've learned to draft your own architecture! While your final acoustic model should not be identical to any of the architectures explored above, you are welcome to merely combine the explored layers above into a deeper architecture. It is **NOT** necessary to include new layer types that were not explored in the notebook.\n\nHowever, if you would like some ideas for even more layer types, check out these ideas for some additional, optional extensions to your model:\n\n- If you notice your model is overfitting to the training dataset, consider adding **dropout**! To add dropout to [recurrent layers](https://faroit.github.io/keras-docs/1.0.2/layers/recurrent/), pay special attention to the `dropout_W` and `dropout_U` arguments. This [paper](http://arxiv.org/abs/1512.05287) may also provide some interesting theoretical background.\n- If you choose to include a convolutional layer in your model, you may get better results by working with **dilated convolutions**. If you choose to use dilated convolutions, make sure that you are able to accurately calculate the length of the acoustic model's output in the `model.output_length` lambda function. You can read more about dilated convolutions in Google's [WaveNet paper](https://arxiv.org/abs/1609.03499). For an example of a speech-to-text system that makes use of dilated convolutions, check out this GitHub [repository](https://github.com/buriburisuri/speech-to-text-wavenet). You can work with dilated convolutions [in Keras](https://keras.io/layers/convolutional/) by paying special attention to the `padding` argument when you specify a convolutional layer.\n- If your model makes use of convolutional layers, why not also experiment with adding **max pooling**? Check out [this paper](https://arxiv.org/pdf/1701.02720.pdf) for example architecture that makes use of max pooling in an acoustic model.\n- So far, you have experimented with a single bidirectional RNN layer. Consider stacking the bidirectional layers, to produce a [deep bidirectional RNN](https://www.cs.toronto.edu/~graves/asru_2013.pdf)!\n\nAll models that you specify in this repository should have `output_length` defined as an attribute. This attribute is a lambda function that maps the (temporal) length of the input acoustic features to the (temporal) length of the output softmax layer. This function is used in the computation of CTC loss; to see this, look at the `add_ctc_loss` function in `train_utils.py`. To see where the `output_length` attribute is defined for the models in the code, take a look at the `sample_models.py` file. You will notice this line of code within most models:\n```\nmodel.output_length = lambda x: x\n```\nThe acoustic model that incorporates a convolutional layer (`cnn_rnn_model`) has a line that is a bit different:\n```\nmodel.output_length = lambda x: cnn_output_length(\n x, kernel_size, conv_border_mode, conv_stride)\n```\n\nIn the case of models that use purely recurrent layers, the lambda function is the identity function, as the recurrent layers do not modify the (temporal) length of their input tensors. However, convolutional layers are more complicated and require a specialized function (`cnn_output_length` in `sample_models.py`) to determine the temporal length of their output.\n\nYou will have to add the `output_length` attribute to your final model before running the code cell below. Feel free to use the `cnn_output_length` function, if it suits your model. ", "_____no_output_____" ] ], [ [ "# specify the model\nmodel_end = final_model(\n input_dim=161,\n filters=50,\n kernel_size=(11,11),\n conv_stride=1,\n conv_border_mode='same',\n pool_size=(1,5),\n units=200,\n recur_layers=1,\n dropout_rate=0.5)", "WARNING:tensorflow:From /home/pjordan/anaconda3/envs/dnn-speech-recognizer/lib/python3.5/site-packages/keras/backend/tensorflow_backend.py:1208: calling reduce_prod (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\nInstructions for updating:\nkeep_dims is deprecated, use keepdims instead\n_________________________________________________________________\nLayer (type) Output Shape Param # \n=================================================================\nthe_input (InputLayer) (None, None, 161) 0 \n_________________________________________________________________\nlambda_17 (Lambda) (None, None, 161, 1) 0 \n_________________________________________________________________\nconv2d_1 (Conv2D) (None, None, 161, 50) 6100 \n_________________________________________________________________\nmax_pooling2d_1 (MaxPooling2 (None, None, 32, 50) 0 \n_________________________________________________________________\ntime_distributed_19 (TimeDis (None, None, 1600) 0 \n_________________________________________________________________\nbidirectional_1 (Bidirection (None, None, 400) 2161200 \n_________________________________________________________________\nbatch_normalization_19 (Batc (None, None, 400) 1600 \n_________________________________________________________________\ntime_distributed_20 (TimeDis (None, None, 29) 11629 \n_________________________________________________________________\nsoftmax (Activation) (None, None, 29) 0 \n=================================================================\nTotal params: 2,180,529\nTrainable params: 2,179,729\nNon-trainable params: 800\n_________________________________________________________________\nNone\n" ] ], [ [ "Please execute the code cell below to train the neural network you specified in `input_to_softmax`. After the model has finished training, the model is [saved](https://keras.io/getting-started/faq/#how-can-i-save-a-keras-model) in the HDF5 file `model_end.h5`. The loss history is [saved](https://wiki.python.org/moin/UsingPickle) in `model_end.pickle`. You are welcome to tweak any of the optional parameters while calling the `train_model` function, but this is not required.", "_____no_output_____" ] ], [ [ "from keras.optimizers import SGD\n\ntrain_model(input_to_softmax=model_end, \n pickle_path='model_end.pickle', \n save_model_path='model_end.h5',\n optimizer=SGD(lr=0.05, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=1),\n spectrogram=True) # change to False if you would like to use MFCC features", "Epoch 1/20\n106/106 [==============================] - 248s - loss: 335.9858 - val_loss: 255.5860\nEpoch 2/20\n106/106 [==============================] - 240s - loss: 242.4996 - val_loss: 238.2656\nEpoch 3/20\n106/106 [==============================] - 239s - loss: 222.3218 - val_loss: 197.3325\nEpoch 4/20\n106/106 [==============================] - 241s - loss: 200.9018 - val_loss: 185.4125\nEpoch 5/20\n106/106 [==============================] - 239s - loss: 187.2262 - val_loss: 171.6594\nEpoch 6/20\n106/106 [==============================] - 236s - loss: 175.8966 - val_loss: 157.7085\nEpoch 7/20\n106/106 [==============================] - 241s - loss: 167.2219 - val_loss: 154.9972\nEpoch 8/20\n106/106 [==============================] - 236s - loss: 161.3043 - val_loss: 151.5892\nEpoch 9/20\n106/106 [==============================] - 237s - loss: 156.4006 - val_loss: 145.9787\nEpoch 10/20\n106/106 [==============================] - 238s - loss: 152.1061 - val_loss: 141.4593\nEpoch 11/20\n106/106 [==============================] - 238s - loss: 148.1522 - val_loss: 139.1478\nEpoch 12/20\n106/106 [==============================] - 238s - loss: 145.1965 - val_loss: 136.7189\nEpoch 13/20\n106/106 [==============================] - 239s - loss: 142.2492 - val_loss: 134.3185\nEpoch 14/20\n106/106 [==============================] - 237s - loss: 140.1469 - val_loss: 131.4872\nEpoch 15/20\n106/106 [==============================] - 238s - loss: 137.7255 - val_loss: 129.9063\nEpoch 16/20\n106/106 [==============================] - 236s - loss: 135.6871 - val_loss: 129.2205\nEpoch 17/20\n106/106 [==============================] - 239s - loss: 133.5798 - val_loss: 127.0699\nEpoch 18/20\n106/106 [==============================] - 239s - loss: 131.8900 - val_loss: 125.6219\nEpoch 19/20\n106/106 [==============================] - 237s - loss: 130.2181 - val_loss: 126.2355\nEpoch 20/20\n106/106 [==============================] - 237s - loss: 128.9155 - val_loss: 126.0262\n" ] ], [ [ "__Question 2:__ Describe your final model architecture and your reasoning at each step. \n\n__Answer:__\n\nThe final architecture included a two-dimensional convolutional layer followed by a max-pooling layer. The output of the max pooling layer fed into a bi-directional GRU layer, which outputted to a time-distributed dense layer. In total, the network has 2,179,729.\n\nThe 2D convolutional and max pooling layers are used to transform the time and frequency matrix into a time and feature matrix input, hopefully producing meaningful distilations of common waveforms. As in the base models in the previous section, the bidirectional GRU allows more flexibility by processing in both directions in time. The latter does not appear to add much improvement over a GRU with comparable parameters.", "_____no_output_____" ], [ "<a id='step3'></a>\n## STEP 3: Obtain Predictions\n\nWe have written a function for you to decode the predictions of your acoustic model. To use the function, please execute the code cell below.", "_____no_output_____" ] ], [ [ "import numpy as np\nfrom data_generator import AudioGenerator\nfrom keras import backend as K\nfrom utils import int_sequence_to_text\nfrom IPython.display import Audio\n\ndef get_predictions(index, partition, input_to_softmax, model_path):\n \"\"\" Print a model's decoded predictions\n Params:\n index (int): The example you would like to visualize\n partition (str): One of 'train' or 'validation'\n input_to_softmax (Model): The acoustic model\n model_path (str): Path to saved acoustic model's weights\n \"\"\"\n # load the train and test data\n data_gen = AudioGenerator()\n data_gen.load_train_data()\n data_gen.load_validation_data()\n \n # obtain the true transcription and the audio features \n if partition == 'validation':\n transcr = data_gen.valid_texts[index]\n audio_path = data_gen.valid_audio_paths[index]\n data_point = data_gen.normalize(data_gen.featurize(audio_path))\n elif partition == 'train':\n transcr = data_gen.train_texts[index]\n audio_path = data_gen.train_audio_paths[index]\n data_point = data_gen.normalize(data_gen.featurize(audio_path))\n else:\n raise Exception('Invalid partition! Must be \"train\" or \"validation\"')\n \n # obtain and decode the acoustic model's predictions\n input_to_softmax.load_weights(model_path)\n prediction = input_to_softmax.predict(np.expand_dims(data_point, axis=0))\n output_length = [input_to_softmax.output_length(data_point.shape[0])] \n pred_ints = (K.eval(K.ctc_decode(\n prediction, output_length)[0][0])+1).flatten().tolist()\n \n # play the audio file, and display the true and predicted transcriptions\n print('-'*80)\n Audio(audio_path)\n print('True transcription:\\n' + '\\n' + transcr)\n print('-'*80)\n print('Predicted transcription:\\n' + '\\n' + ''.join(int_sequence_to_text(pred_ints)))\n print('-'*80)", "_____no_output_____" ] ], [ [ "Use the code cell below to obtain the transcription predicted by your final model for the first example in the training dataset.", "_____no_output_____" ] ], [ [ "get_predictions(index=0, \n partition='train',\n input_to_softmax=model_end, \n model_path='results/model_end.h5')", "--------------------------------------------------------------------------------\nTrue transcription:\n\nhe was young no spear had touched him no poison lurked in his wine\n--------------------------------------------------------------------------------\nPredicted transcription:\n\nhe was o no sperhd thtm no pis on mork din iso\n--------------------------------------------------------------------------------\n" ] ], [ [ "Use the next code cell to visualize the model's prediction for the first example in the validation dataset.", "_____no_output_____" ] ], [ [ "get_predictions(index=0, \n partition='validation',\n input_to_softmax=model_end, \n model_path='results/model_end.h5')", "--------------------------------------------------------------------------------\nTrue transcription:\n\no life of this our spring\n--------------------------------------------------------------------------------\nPredicted transcription:\n\n bo f an dhes rbrn\n--------------------------------------------------------------------------------\n" ] ], [ [ "One standard way to improve the results of the decoder is to incorporate a language model. We won't pursue this in the notebook, but you are welcome to do so as an _optional extension_. \n\nIf you are interested in creating models that provide improved transcriptions, you are encouraged to download [more data](http://www.openslr.org/12/) and train bigger, deeper models. But beware - the model will likely take a long while to train. For instance, training this [state-of-the-art](https://arxiv.org/pdf/1512.02595v1.pdf) model would take 3-6 weeks on a single GPU!", "_____no_output_____" ] ], [ [ "!!python -m nbconvert *.ipynb", "_____no_output_____" ], [ "!!zip submission.zip vui_notebook.ipynb report.html sample_models.py results/*", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ] ]
d015fd72eb32a3eda85e671e9885e7df17a457b7
15,774
ipynb
Jupyter Notebook
demo/BERT/inference.ipynb
malithj/TensorRT
48605d4b5673df89110cf41249ad007259d7c34a
[ "Apache-2.0" ]
5,249
2019-06-17T17:20:34.000Z
2022-03-31T17:56:05.000Z
demo/BERT/inference.ipynb
zhuyujie1993/TensorRT
c2668947ea9ba4c73eb1182c162101f09ff250fd
[ "Apache-2.0" ]
1,721
2019-06-17T18:13:29.000Z
2022-03-31T16:09:53.000Z
demo/BERT/inference.ipynb
zhuyujie1993/TensorRT
c2668947ea9ba4c73eb1182c162101f09ff250fd
[ "Apache-2.0" ]
1,414
2019-06-18T04:01:17.000Z
2022-03-31T09:16:53.000Z
44.061453
1,339
0.629961
[ [ [ "# Copyright 2021 NVIDIA Corporation. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================", "_____no_output_____" ] ], [ [ "<img src=\"https://upload.wikimedia.org/wikipedia/en/6/6d/Nvidia_image_logo.svg\" style=\"width: 90px; float: right;\">\n\n# QA Inference on BERT using TensorRT", "_____no_output_____" ], [ "## 1. Overview\n\nBidirectional Embedding Representations from Transformers (BERT), is a method of pre-training language representations which obtains state-of-the-art results on a wide array of Natural Language Processing (NLP) tasks. \n\nThe original paper can be found here: https://arxiv.org/abs/1810.04805.\n", "_____no_output_____" ], [ "### 1.a Learning objectives\n\nThis notebook demonstrates:\n- Inference on Question Answering (QA) task with BERT Base/Large model\n- The use fine-tuned NVIDIA BERT models\n- Use of BERT model with TRT", "_____no_output_____" ], [ "## 2. Requirements\n\nPlease refer to the ReadMe file", "_____no_output_____" ], [ "## 3. BERT Inference: Question Answering\n\nWe can run inference on a fine-tuned BERT model for tasks like Question Answering.\n\nHere we use a BERT model fine-tuned on a [SQuaD 2.0 Dataset](https://rajpurkar.github.io/SQuAD-explorer/) which contains 100,000+ question-answer pairs on 500+ articles combined with over 50,000 new, unanswerable questions.", "_____no_output_____" ], [ "### 3.a Paragraph and Queries\n\nThe paragraph and the questions can be customized by changing the text below. Note that when using models with small sequence lengths, you should use a shorter paragraph:", "_____no_output_____" ], [ "#### Paragraph:", "_____no_output_____" ] ], [ [ "paragraph_text = \"The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President John F. Kennedy's national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975.\"\n\n# Short paragraph version for BERT models with max sequence length of 128\nshort_paragraph_text = \"The Apollo program was the third United States human spaceflight program. First conceived as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was dedicated to President John F. Kennedy's national goal of landing a man on the Moon. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972 followed by the Apollo-Soyuz Test Project a joint Earth orbit mission with the Soviet Union in 1975.\"", "_____no_output_____" ] ], [ [ "#### Question:", "_____no_output_____" ] ], [ [ "question_text = \"What project put the first Americans into space?\"\n#question_text = \"What year did the first manned Apollo flight occur?\"\n#question_text = \"What President is credited with the original notion of putting Americans in space?\"\n#question_text = \"Who did the U.S. collaborate with on an Earth orbit mission in 1975?\"", "_____no_output_____" ] ], [ [ "In this example we ask our BERT model questions related to the following paragraph:\n\n**The Apollo Program**\n_\"The Apollo program, also known as Project Apollo, was the third United States human spaceflight program carried out by the National Aeronautics and Space Administration (NASA), which accomplished landing the first humans on the Moon from 1969 to 1972. First conceived during Dwight D. Eisenhower's administration as a three-man spacecraft to follow the one-man Project Mercury which put the first Americans in space, Apollo was later dedicated to President John F. Kennedy's national goal of landing a man on the Moon and returning him safely to the Earth by the end of the 1960s, which he proposed in a May 25, 1961, address to Congress. Project Mercury was followed by the two-man Project Gemini. The first manned flight of Apollo was in 1968. Apollo ran from 1961 to 1972, and was supported by the two-man Gemini program which ran concurrently with it from 1962 to 1966. Gemini missions developed some of the space travel techniques that were necessary for the success of the Apollo missions. Apollo used Saturn family rockets as launch vehicles. Apollo/Saturn vehicles were also used for an Apollo Applications Program, which consisted of Skylab, a space station that supported three manned missions in 1973-74, and the Apollo-Soyuz Test Project, a joint Earth orbit mission with the Soviet Union in 1975.\"_\n\nThe questions and relative answers expected are shown below:\n\n - **Q1:** \"What project put the first Americans into space?\" \n - **A1:** \"Project Mercury\"\n - **Q2:** \"What program was created to carry out these projects and missions?\"\n - **A2:** \"The Apollo program\"\n - **Q3:** \"What year did the first manned Apollo flight occur?\"\n - **A3:** \"1968\"\n - **Q4:** \"What President is credited with the original notion of putting Americans in space?\"\n - **A4:** \"John F. Kennedy\"\n - **Q5:** \"Who did the U.S. collaborate with on an Earth orbit mission in 1975?\"\n - **A5:** \"Soviet Union\"\n - **Q6:** \"How long did Project Apollo run?\"\n - **A6:** \"1961 to 1972\"\n - **Q7:** \"What program helped develop space travel techniques that Project Apollo used?\"\n - **A7:** \"Gemini Mission\"\n - **Q8:** \"What space station supported three manned missions in 1973-1974?\"\n - **A8:** \"Skylab\"", "_____no_output_____" ], [ "## Data Preprocessing\nLet's convert the paragraph and the question to BERT input with the help of the tokenizer:", "_____no_output_____" ] ], [ [ "import helpers.data_processing as dp\nimport helpers.tokenization as tokenization\n\ntokenizer = tokenization.FullTokenizer(vocab_file=\"/workspace/TensorRT/demo/BERT/models/fine-tuned/bert_tf_ckpt_large_qa_squad2_amp_128_v19.03.1/vocab.txt\", do_lower_case=True)\n\n# The maximum number of tokens for the question. Questions longer than this will be truncated to this length.\nmax_query_length = 64\n\n# When splitting up a long document into chunks, how much stride to take between chunks.\ndoc_stride = 128\n\n# The maximum total input sequence length after WordPiece tokenization. \n# Sequences longer than this will be truncated, and sequences shorter \nmax_seq_length = 128\n\n# Extract tokens from the paragraph\ndoc_tokens = dp.convert_doc_tokens(short_paragraph_text)\n\n# Extract features from the paragraph and question\nfeatures = dp.convert_example_to_features(doc_tokens, question_text, tokenizer, max_seq_length, doc_stride, max_query_length)\n", "_____no_output_____" ] ], [ [ "## TensorRT Inference", "_____no_output_____" ] ], [ [ "import tensorrt as trt\nTRT_LOGGER = trt.Logger(trt.Logger.INFO)", "_____no_output_____" ], [ "import ctypes\nimport os\n\nctypes.CDLL(\"libnvinfer_plugin.so\", mode=ctypes.RTLD_GLOBAL)", "_____no_output_____" ], [ "import pycuda.driver as cuda\nimport pycuda.autoinit\nimport collections\nimport numpy as np\nimport time\n\n# Load the BERT-Large Engine\nwith open(\"/workspace/TensorRT/demo/BERT/engines/bert_large_128.engine\", \"rb\") as f, \\\n trt.Runtime(TRT_LOGGER) as runtime, \\\n runtime.deserialize_cuda_engine(f.read()) as engine, \\\n engine.create_execution_context() as context:\n\n # We always use batch size 1.\n input_shape = (1, max_seq_length)\n input_nbytes = trt.volume(input_shape) * trt.int32.itemsize\n \n # Allocate device memory for inputs.\n d_inputs = [cuda.mem_alloc(input_nbytes) for binding in range(3)]\n # Create a stream in which to copy inputs/outputs and run inference.\n stream = cuda.Stream()\n\n # Specify input shapes. These must be within the min/max bounds of the active profile (0th profile in this case)\n # Note that input shapes can be specified on a per-inference basis, but in this case, we only have a single shape.\n for binding in range(3):\n context.set_binding_shape(binding, input_shape)\n assert context.all_binding_shapes_specified\n\n # Allocate output buffer by querying the size from the context. This may be different for different input shapes.\n h_output = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.float32)\n d_output = cuda.mem_alloc(h_output.nbytes)\n\n print(\"\\nRunning Inference...\")\n\n _NetworkOutput = collections.namedtuple( # pylint: disable=invalid-name\n \"NetworkOutput\",\n [\"start_logits\", \"end_logits\", \"feature_index\"])\n networkOutputs = []\n\n eval_time_elapsed = 0\n for feature_index, feature in enumerate(features):\n # Copy inputs\n input_ids = cuda.register_host_memory(np.ascontiguousarray(feature.input_ids.ravel()))\n segment_ids = cuda.register_host_memory(np.ascontiguousarray(feature.segment_ids.ravel()))\n input_mask = cuda.register_host_memory(np.ascontiguousarray(feature.input_mask.ravel()))\n\n eval_start_time = time.time()\n cuda.memcpy_htod_async(d_inputs[0], input_ids, stream)\n cuda.memcpy_htod_async(d_inputs[1], segment_ids, stream)\n cuda.memcpy_htod_async(d_inputs[2], input_mask, stream)\n\n # Run inference\n context.execute_async_v2(bindings=[int(d_inp) for d_inp in d_inputs] + [int(d_output)], stream_handle=stream.handle)\n # Synchronize the stream\n stream.synchronize()\n eval_time_elapsed += (time.time() - eval_start_time)\n\n # Transfer predictions back from GPU\n cuda.memcpy_dtoh_async(h_output, d_output, stream)\n stream.synchronize()\n\n for index, batch in enumerate(h_output):\n # Data Post-processing\n networkOutputs.append(_NetworkOutput(\n start_logits = np.array(batch.squeeze()[:, 0]),\n end_logits = np.array(batch.squeeze()[:, 1]),\n feature_index = feature_index\n ))\n\n eval_time_elapsed /= len(features)\n \n print(\"-----------------------------\")\n print(\"Running Inference at {:.3f} Sentences/Sec\".format(1.0/eval_time_elapsed))\n print(\"-----------------------------\")", "_____no_output_____" ] ], [ [ "## Data Post-Processing", "_____no_output_____" ], [ "Now that we have the inference results let's extract the actual answer to our question", "_____no_output_____" ] ], [ [ " # The total number of n-best predictions to generate in the nbest_predictions.json output file\n n_best_size = 20\n\n # The maximum length of an answer that can be generated. This is needed \n # because the start and end predictions are not conditioned on one another\n max_answer_length = 30\n\n prediction, nbest_json, scores_diff_json = dp.get_predictions(doc_tokens, features,\n networkOutputs, n_best_size, max_answer_length)\n \n for index, output in enumerate(networkOutputs):\n print(\"Processing output\")\n print(\"Answer: '{}'\".format(prediction))\n print(\"with prob: {:.3f}%\".format(nbest_json[0]['probability'] * 100.0))", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0160a0c63e287e7d097703eaadd8b0f5b6be103
1,883
ipynb
Jupyter Notebook
Chapter04/Exercise4.03/Exercise4_03.ipynb
ibmdev/The-Machine-Learning-Workshop
9c6e3c978b09b8a6ff1d95f0a6fd2001de96d8b4
[ "MIT" ]
21
2020-03-17T17:22:44.000Z
2022-03-08T04:38:23.000Z
Chapter04/Exercise4.03/Exercise4_03.ipynb
ibmdev/The-Machine-Learning-Workshop
9c6e3c978b09b8a6ff1d95f0a6fd2001de96d8b4
[ "MIT" ]
null
null
null
Chapter04/Exercise4.03/Exercise4_03.ipynb
ibmdev/The-Machine-Learning-Workshop
9c6e3c978b09b8a6ff1d95f0a6fd2001de96d8b4
[ "MIT" ]
41
2020-03-05T13:25:28.000Z
2022-01-31T17:13:20.000Z
19.412371
86
0.49761
[ [ [ "import pandas as pd\nfrom sklearn.svm import SVC", "_____no_output_____" ], [ "data = pd.read_csv(\"fertility_Diagnosis.csv\", header=None)", "_____no_output_____" ], [ "X = data.iloc[:,:9]\nY = data.iloc[:,9]", "_____no_output_____" ], [ "model = SVC()\nmodel.fit(X,Y)", "_____no_output_____" ], [ "pred = model.predict([[-0.33,0.69,0,1,1,0,0.8,0,0.88]])\nprint(pred)", "['N']\n" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d01612a01da71a1ffb80c565b40805bda12d9801
137,086
ipynb
Jupyter Notebook
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
e9e7cf3636a3adf8572e69346c08e65cfcdb1100
[ "MIT" ]
null
null
null
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
e9e7cf3636a3adf8572e69346c08e65cfcdb1100
[ "MIT" ]
null
null
null
bonston_housing_project/Regularized Regression.ipynb
taareek/machine_learning
e9e7cf3636a3adf8572e69346c08e65cfcdb1100
[ "MIT" ]
null
null
null
302.618102
59,812
0.928176
[ [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model", "_____no_output_____" ], [ "# X is the 10 X 10 Hilbert Matrix\nX = 1. / (np.arange(1, 11) + np.arange(0,10)[:, np.newaxis])\ny = np.ones(10)", "_____no_output_____" ], [ "print(X.shape)\nX", "(10, 10)\n" ], [ "# Compute paths \nn_alphas = 200\nalphas = np.logspace(-10, -2, n_alphas)\n\ncoefs = []\nfor a in alphas:\n ridge = linear_model.Ridge(alpha= a, fit_intercept= False)\n# print(ridge)\n ridge.fit(X,y)\n coefs.append(ridge.coef_)", "_____no_output_____" ], [ "# Display Results\nplt.figure(figsize=(10,8))\nax = plt.gca()\nax.plot(alphas, coefs)\nax.set_xscale('log')\nax.set_xlim(ax.get_xlim()[::-1]) # reverse axis\nplt.xlabel('alpha')\nplt.ylabel('weights')\nplt.title(\"Ridge coefficient as a function of the regularization\")\nplt.axis('tight')\nplt.show()", "_____no_output_____" ] ], [ [ "# Outliers Impact", "_____no_output_____" ] ], [ [ "import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns \nsns.set_style('whitegrid')\n%matplotlib inline\nimport pandas as pd", "_____no_output_____" ] ], [ [ "## Linear Regression ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import LinearRegression ", "_____no_output_____" ], [ "np.random.seed(42)\nn_samples = 100\nrng = np.random.randn(n_samples) * 10\nprint(\"Feeature shape: \", rng.shape)\ny_gen = 0.5 * rng + 2 * np.random.randn(n_samples)\nprint(\"\\nTarget shape: \", y_gen.shape)\n\nlr = LinearRegression()\nlr.fit(rng.reshape(-1, 1), y_gen)\nmodel_pred = lr.predict(rng.reshape(-1, 1))\n\n# plotting \nplt.figure(figsize= (10, 8));\nplt.scatter(rng, y_gen);\nplt.plot(rng, model_pred);\nprint(\"Coefficient Estimate: \", lr.coef_);", "Feeature shape: (100,)\n\nTarget shape: (100,)\nCoefficient Estimate: [0.47134857]\n" ], [ "idx= rng.argmax()\ny_gen[idx] = 200", "_____no_output_____" ], [ "plt.figure(figsize=(10, 8));\nplt.scatter(rng, y_gen);\n\no_lr = LinearRegression(normalize= True)\no_lr.fit(rng.reshape(-1, 1), y_gen)\no_model_pred = o_lr.predict(rng.reshape(-1, 1))\n\nplt.scatter(rng, y_gen);\nplt.plot(rng, o_model_pred)\nprint(\"Coefficient Estimate: \", o_lr.coef_)", "Coefficient Estimate: [0.92796845]\n" ] ], [ [ "## Ridge Regression ", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Ridge", "_____no_output_____" ], [ "ridge_mod = Ridge(alpha= 1, normalize= True)\nridge_mod.fit(rng.reshape(-1, 1), y_gen)\nridge_mod_pred = ridge_mod.predict(rng.reshape(-1,1))\n\nplt.figure(figsize=(10,8))\nplt.scatter(rng, y_gen);\nplt.plot(rng, ridge_mod_pred);\nprint(\"Coefficient of Estimation: \", ridge_mod.coef_)", "Coefficient of Estimation: [0.46398423]\n" ], [ "# ridge_mod_pred", "_____no_output_____" ] ], [ [ "# Lasso Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import Lasso", "_____no_output_____" ], [ "# define model \nlasso_mod = Lasso(alpha= 0.4, normalize= True)\nlasso_mod.fit(rng.reshape(-1, 1), y_gen) # (features, target)\nlasso_mod_pred = lasso_mod.predict(rng.reshape(-1,1)) # (features)\n\n# plotting\nplt.figure(figsize=(10, 8));\nplt.scatter(rng, y_gen); # (features, target)\nplt.plot(rng, lasso_mod_pred); # (features, prediction)\nprint(\"Coefficient Estimation: \", lasso_mod.coef_) # coefficent change by the rate of alpha ", "Coefficient Estimation: [0.48530263]\n" ] ], [ [ "# Elastic Net Regression", "_____no_output_____" ] ], [ [ "from sklearn.linear_model import ElasticNet", "_____no_output_____" ], [ "# defining model and prediction \nelnet_mod = ElasticNet(alpha= 0.02, normalize= True)\nelnet_mod.fit(rng.reshape(-1, 1), y_gen)\nelnet_pred = elnet_mod.predict(rng.reshape(-1,1))\n\n# plotting \nplt.figure(figsize=(10, 8));\nplt.scatter(rng, y_gen);\nplt.plot(rng, elnet_pred);\nprint(\"Coefficent Estimation: \", elnet_mod.coef_)", "Coefficent Estimation: [0.4584509]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code" ] ]
d01628dbbffcd0981b6e9e8890badf7e5c4d6cd1
9,664
ipynb
Jupyter Notebook
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
61d1dc7e206d9032a6e5b5304598526c0516b5bb
[ "MIT" ]
null
null
null
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
61d1dc7e206d9032a6e5b5304598526c0516b5bb
[ "MIT" ]
null
null
null
how-to-guides/add-column-using-expression.ipynb
Bhaskers-Blu-Org2/AMLDataPrepDocs
61d1dc7e206d9032a6e5b5304598526c0516b5bb
[ "MIT" ]
1
2020-07-30T12:35:49.000Z
2020-07-30T12:35:49.000Z
30.582278
512
0.548841
[ [ [ "![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/work-with-data/dataprep/how-to-guides/add-column-using-expression.png)", "_____no_output_____" ], [ "# Add Column using Expression\n", "_____no_output_____" ], [ "With Azure ML Data Prep you can add a new column to data with `Dataflow.add_column` by using a Data Prep expression to calculate the value from existing columns. This is similar to using Python to create a [new script column](./custom-python-transforms.ipynb#New-Script-Column) except the Data Prep expressions are more limited and will execute faster. The expressions used are the same as for [filtering rows](./filtering.ipynb#Filtering-rows) and hence have the same functions and operators available.\n<p>\nHere we add additional columns. First we get input data.", "_____no_output_____" ] ], [ [ "import azureml.dataprep as dprep", "_____no_output_____" ], [ "# loading data\ndflow = dprep.auto_read_file('../data/crime-spring.csv')\ndflow.head(5)", "_____no_output_____" ] ], [ [ "#### `substring(start, length)`\nAdd a new column \"Case Category\" using the `substring(start, length)` expression to extract the prefix from the \"Case Number\" column.", "_____no_output_____" ] ], [ [ "case_category = dflow.add_column(new_column_name='Case Category',\n prior_column='Case Number',\n expression=dflow['Case Number'].substring(0, 2))\ncase_category.head(5)", "_____no_output_____" ] ], [ [ "#### `substring(start)`\nAdd a new column \"Case Id\" using the `substring(start)` expression to extract just the number from \"Case Number\" column and then convert it to numeric.", "_____no_output_____" ] ], [ [ "case_id = dflow.add_column(new_column_name='Case Id',\n prior_column='Case Number',\n expression=dflow['Case Number'].substring(2))\ncase_id = case_id.to_number('Case Id')\ncase_id.head(5)", "_____no_output_____" ] ], [ [ "#### `length()`\nUsing the length() expression, add a new numeric column \"Length\", which contains the length of the string in \"Primary Type\".", "_____no_output_____" ] ], [ [ "dflow_length = dflow.add_column(new_column_name='Length',\n prior_column='Primary Type',\n expression=dflow['Primary Type'].length())\ndflow_length.head(5)", "_____no_output_____" ] ], [ [ "#### `to_upper()`\nUsing the to_upper() expression, add a new numeric column \"Upper Case\", which contains the string in \"Primary Type\" in upper case.", "_____no_output_____" ] ], [ [ "dflow_to_upper = dflow.add_column(new_column_name='Upper Case',\n prior_column='Primary Type',\n expression=dflow['Primary Type'].to_upper())\ndflow_to_upper.head(5)", "_____no_output_____" ] ], [ [ "#### `to_lower()`\nUsing the to_lower() expression, add a new numeric column \"Lower Case\", which contains the string in \"Primary Type\" in lower case.", "_____no_output_____" ] ], [ [ "dflow_to_lower = dflow.add_column(new_column_name='Lower Case',\n prior_column='Primary Type',\n expression=dflow['Primary Type'].to_lower())\ndflow_to_lower.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) + col(column2)`\nAdd a new column \"Total\" to show the result of adding the values in the \"FBI Code\" column to the \"Community Area\" column.", "_____no_output_____" ] ], [ [ "dflow_total = dflow.add_column(new_column_name='Total',\n prior_column='FBI Code',\n expression=dflow['Community Area']+dflow['FBI Code'])\ndflow_total.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) - col(column2)`\nAdd a new column \"Subtract\" to show the result of subtracting the values in the \"FBI Code\" column from the \"Community Area\" column.", "_____no_output_____" ] ], [ [ "dflow_diff = dflow.add_column(new_column_name='Difference',\n prior_column='FBI Code',\n expression=dflow['Community Area']-dflow['FBI Code'])\ndflow_diff.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) * col(column2)`\nAdd a new column \"Product\" to show the result of multiplying the values in the \"FBI Code\" column to the \"Community Area\" column.", "_____no_output_____" ] ], [ [ "dflow_prod = dflow.add_column(new_column_name='Product',\n prior_column='FBI Code',\n expression=dflow['Community Area']*dflow['FBI Code'])\ndflow_prod.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) / col(column2)`\nAdd a new column \"True Quotient\" to show the result of true (decimal) division of the values in \"Community Area\" column by the \"FBI Code\" column.", "_____no_output_____" ] ], [ [ "dflow_true_div = dflow.add_column(new_column_name='True Quotient',\n prior_column='FBI Code',\n expression=dflow['Community Area']/dflow['FBI Code'])\ndflow_true_div.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) // col(column2)`\nAdd a new column \"Floor Quotient\" to show the result of floor (integer) division of the values in \"Community Area\" column by the \"FBI Code\" column.", "_____no_output_____" ] ], [ [ "dflow_floor_div = dflow.add_column(new_column_name='Floor Quotient',\n prior_column='FBI Code',\n expression=dflow['Community Area']//dflow['FBI Code'])\ndflow_floor_div.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) % col(column2)`\nAdd a new column \"Mod\" to show the result of applying the modulo operation on the \"FBI Code\" column and the \"Community Area\" column.", "_____no_output_____" ] ], [ [ "dflow_mod = dflow.add_column(new_column_name='Mod',\n prior_column='FBI Code',\n expression=dflow['Community Area']%dflow['FBI Code'])\ndflow_mod.head(5)", "_____no_output_____" ] ], [ [ "#### `col(column1) ** col(column2)`\nAdd a new column \"Power\" to show the result of applying the exponentiation operation when the base is the \"Community Area\" column and the exponent is \"FBI Code\" column.", "_____no_output_____" ] ], [ [ "dflow_pow = dflow.add_column(new_column_name='Power',\n prior_column='FBI Code',\n expression=dflow['Community Area']**dflow['FBI Code'])\ndflow_pow.head(5)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0162d2dff95b7fdf586d6fd4416c90da1d52c5a
336,054
ipynb
Jupyter Notebook
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
59c80050e75be089d9228c74086b14e1e0bbcd59
[ "MIT" ]
null
null
null
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
59c80050e75be089d9228c74086b14e1e0bbcd59
[ "MIT" ]
null
null
null
scripts/object_identification_basic.ipynb
hhelmbre/qdbvcella
59c80050e75be089d9228c74086b14e1e0bbcd59
[ "MIT" ]
null
null
null
733.742358
258,576
0.954308
[ [ [ "# Purpose: A basic object identification package for the lab to use", "_____no_output_____" ], [ "*Step 1: import packages*", "_____no_output_____" ] ], [ [ "import os.path as op\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#Sci-kit Image Imports\nfrom skimage import io\nfrom skimage import filters\nfrom skimage.feature import canny\nfrom skimage import measure\n\nfrom scipy import ndimage as ndi\n\n%matplotlib inline\n\nimport warnings\nwarnings.filterwarnings('ignore')", "_____no_output_____" ] ], [ [ "*Step 2: User Inputs*", "_____no_output_____" ] ], [ [ "file_location = '../../31.2_DG_quant.tif'\nplot_name = 'practice2.png'\n\nchannel_1_color = 'Blue'\nchannel_2_color = 'Green'", "_____no_output_____" ] ], [ [ "*Step 3: Read the image into the notebook*", "_____no_output_____" ] ], [ [ "#Read in the file\nim = io.imread(file_location)\n\n#Convert image to numpy array\nimarray = np.array(im)\n\n#Checking the image shape\nimarray.shape", "_____no_output_____" ] ], [ [ "*Step 4: Color Split*", "_____no_output_____" ] ], [ [ "channel_1 = im[0, :, :]\nchannel_2 = im[1, :, :]", "_____no_output_____" ] ], [ [ "*Step 5: Visualization Check*", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\nax1 = fig.add_subplot(2,2,1)\nax1.set_title(channel_1_color)\nax1.imshow(channel_1, cmap='gray')\n\nax2 = fig.add_subplot(2,2,2)\nax2.set_title(channel_2_color)\nax2.imshow(channel_2, cmap='gray')\n\nfig.set_size_inches(10.5, 10.5, forward=True)", "_____no_output_____" ] ], [ [ "*Step 6: Apply a Threshold*", "_____no_output_____" ] ], [ [ "threshold_local = filters.threshold_otsu(channel_1)\nbinary_c1 = channel_1 > threshold_local\n\nthreshold_local = filters.threshold_otsu(channel_2)\nbinary_c2 = channel_2 > threshold_local", "_____no_output_____" ], [ "fig = plt.figure()\n\nax1 = fig.add_subplot(2,2,1)\nax1.set_title(str(channel_1_color + ' Threshold'))\nax1.imshow(binary_c1, cmap='gray')\n\nax2 = fig.add_subplot(2,2,2)\nax2.set_title(str(channel_2_color + ' Threshold'))\nax2.imshow(binary_c2, cmap='gray')\n\nfig.set_size_inches(10.5, 10.5, forward=True)", "_____no_output_____" ] ], [ [ "*Step 7: Fill in Objects*", "_____no_output_____" ] ], [ [ "filled_c1 = ndi.binary_fill_holes(binary_c1)\nfilled_c2 = ndi.binary_fill_holes(binary_c2)", "_____no_output_____" ] ], [ [ "*Step 8: Visualization Check*", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\nax1 = fig.add_subplot(2,2,1)\nax1.set_title(str(channel_1_color + ' Filled'))\nax1.imshow(filled_c1, cmap='gray')\n\nax2 = fig.add_subplot(2,2,2)\nax2.set_title(str(channel_2_color + ' Filled'))\nax2.imshow(filled_c2, cmap='gray')\n\nfig.set_size_inches(10.5, 10.5, forward=True)", "_____no_output_____" ] ], [ [ "*Step 9: Labeling Objects*", "_____no_output_____" ] ], [ [ "label_objects1, nb_labels1 = ndi.label(filled_c1)\nsizes1 = np.bincount(label_objects1.ravel())\nmask_sizes1 = sizes1 > 100\nmask_sizes1[0] = 0\ncells_cleaned_c1 = mask_sizes1[label_objects1]", "_____no_output_____" ], [ "label_objects2, nb_labels2 = ndi.label(filled_c2)\nsizes2 = np.bincount(label_objects2.ravel())\nmask_sizes2 = sizes2 > 100\nmask_sizes2[0] = 0\ncells_cleaned_c2 = mask_sizes2[label_objects2]", "_____no_output_____" ], [ "labeled_c1, _ = ndi.label(cells_cleaned_c1)\nlabeled_c2, _ = ndi.label(cells_cleaned_c2)", "_____no_output_____" ] ], [ [ "*Step 10: Visualization Check*", "_____no_output_____" ] ], [ [ "fig = plt.figure()\n\nax1 = fig.add_subplot(2,2,1)\nax1.set_title(str(channel_1_color + ' Labeled'))\nax1.imshow(labeled_c1)\n\nax2 = fig.add_subplot(2,2,2)\nax2.set_title(str(channel_2_color + ' Labeled'))\nax2.imshow(labeled_c2)\n\nfig.set_size_inches(10.5, 10.5, forward=True)", "_____no_output_____" ] ], [ [ "*Step 11: Get Region Props*", "_____no_output_____" ] ], [ [ "regionprops_c1 = measure.regionprops(labeled_c1)\nregionprops_c2 = measure.regionprops(labeled_c2)", "_____no_output_____" ], [ "df = pd.DataFrame(columns=['centroid x', 'centroid y','equiv_diam'])\nk = 1\nfor props in regionprops_c1:\n #Get the properties that I need for areas\n #Add them into a pandas dataframe that has the same number of rows as objects detected\n #\n centroid = props.centroid\n centroid_x = centroid[0]\n centroid_y = centroid[1]\n equiv_diam = props.equivalent_diameter\n df.loc[k] = [centroid_x, centroid_y, equiv_diam]\n k = k + 1", "_____no_output_____" ], [ "df2 = pd.DataFrame(columns=['centroid x', 'centroid y','equiv_diam'])\nk = 1\nfor props in regionprops_c2:\n #Get the properties that I need for areas\n #Add them into a pandas dataframe that has the same number of rows as objects detected\n #\n centroid = props.centroid\n centroid_x = centroid[0]\n centroid_y = centroid[1]\n equiv_diam = props.equivalent_diameter\n df2.loc[k] = [centroid_x, centroid_y, equiv_diam]\n k = k + 1", "_____no_output_____" ], [ "count_c1 = df.shape[0]\nprint('Count ' + channel_1_color + ': ' + str(count_c1))\n\ncount_c2 = df2.shape[0]\nprint('Count ' + channel_2_color + ': ' + str(count_c2))", "Count Blue: 114\nCount Green: 16\n" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code" ] ]
d0164704124e3e4ee555d78c0bf800fe07db3c86
7,234
ipynb
Jupyter Notebook
DigitalBiomarkers-HumanActivityRecognition/00_source/.ipynb_checkpoints/20_sensor_concat-checkpoint.ipynb
Big-Ideas-Lab/DBDP
99357dac197ceeb8c240ead804dd2c8bd3e3fc93
[ "Apache-2.0" ]
20
2020-01-27T16:32:25.000Z
2021-05-27T15:06:29.000Z
DigitalBiomarkers-HumanActivityRecognition/00_source/.ipynb_checkpoints/20_sensor_concat-checkpoint.ipynb
chopeter27/DBDP
99357dac197ceeb8c240ead804dd2c8bd3e3fc93
[ "Apache-2.0" ]
11
2020-01-27T16:22:09.000Z
2020-07-29T20:11:22.000Z
DigitalBiomarkers-HumanActivityRecognition/00_source/.ipynb_checkpoints/20_sensor_concat-checkpoint.ipynb
chopeter27/DBDP
99357dac197ceeb8c240ead804dd2c8bd3e3fc93
[ "Apache-2.0" ]
16
2019-04-05T15:01:46.000Z
2021-07-07T05:42:27.000Z
31.04721
246
0.4389
[ [ [ "## E4 Sensor Concatenation", "_____no_output_____" ], [ "This sensor concatenation file compiles all .csv files of subjects by sensor type. A column is added with the \"Subject_ID\" and arranges the data in order of ascending ID number. The output of this function is a csv file. ", "_____no_output_____" ], [ "***\n\n##### **Input:** Properly formatted .csv files from the E4FileFormatter (DBDP preprocessing folder)\n\n##### **Output:** Each .csv file will consist of only one type of sensor data. A column for subject ID has been added. Data will be organized numerically, by subject ID. Headers will be based on the column names input into the function. \n***", "_____no_output_____" ] ], [ [ "import pandas as pd\nimport glob\nimport os\n\nos.chdir('../00_source')", "_____no_output_____" ] ], [ [ "## Import & Concatenate Sensor Data of Choice\n**Functions:**\n* $\\underline{data\\_concat()}$ - reads all files in data directory (00_source) and concatenates those of one sensor type. Adds subject ID column to resulting .csv file\n > <span style=\"color:blue\">data</span> = data type to be concatenated as a string <br>\n > <span style=\"color:blue\">cols</span> = column names in resulting dataframe as a list <br>\n > <span style=\"color:blue\">file_name</span> = output .csv file name as a string <br>\n", "_____no_output_____" ] ], [ [ "# Select files of specific data and concat to one dataframe\n\ndef data_concat(data, cols, file_name):\n \"\"\"\n data = data type to be concatenated as a string\n cols = column names in resulting dataframe as a list\n file_name = output csv file name as a string\n \"\"\"\n all_filenames = [i for i in glob.glob(f'*{data}.csv')]\n all_filenames = sorted(all_filenames)\n df = pd.concat([pd.read_csv(f, header=None).assign(Subject_ID=os.path.basename(f))\n for f in all_filenames])\n df['Subject_ID'] = df['Subject_ID'].str[:6]\n df.columns = cols\n df.to_csv(f\"../20_Intermediate_files/{file_name}.csv\", index = False)\n return df\n\n\ncols = ['Time', 'TEMP', 'Subject_ID']\n\ndata_concat(\"TEMP\", cols, \"20_Temp_Combined\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d0164f5817cfe2f3b5b6c9b805ab01d0a4a123b3
1,543
ipynb
Jupyter Notebook
jupyterexample/StudyPandas2.ipynb
newrey/QUANTAXIS
1104ead9248ae1a0cd201dd0cd9c662828a01a58
[ "MIT" ]
6
2018-05-28T05:44:19.000Z
2019-03-31T14:38:26.000Z
jupyterexample/StudyPandas2.ipynb
newrey/QUANTAXIS
1104ead9248ae1a0cd201dd0cd9c662828a01a58
[ "MIT" ]
2
2018-05-18T19:54:34.000Z
2018-05-28T16:45:07.000Z
jupyterexample/StudyPandas2.ipynb
newrey/QUANTAXIS
1104ead9248ae1a0cd201dd0cd9c662828a01a58
[ "MIT" ]
3
2018-06-13T02:57:41.000Z
2022-02-12T15:03:29.000Z
17.337079
49
0.50162
[ [ [ "import numpy as np\nimport pandas as pd\nfrom pandas_datareader import data as wb\nimport matplotlib.pyplot as plt", "_____no_output_____" ], [ "tickers = ['^GSPC']", "_____no_output_____" ], [ "ind_data = pd.DataFrame()", "_____no_output_____" ] ], [ [ "variance\n", "_____no_output_____" ] ], [ [ "print(ind_data.info())", "<class 'pandas.core.frame.DataFrame'>\nIndex: 0 entries\nEmpty DataFrameNone\n" ] ] ]
[ "code", "markdown", "code" ]
[ [ "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d016627ff8fe655546e649bd56766c38afd627c1
163,992
ipynb
Jupyter Notebook
examples/2D/2_training.ipynb
feberhardt/stardist
881968c57e8482e51c8a1fbc67239bbf6e960623
[ "BSD-3-Clause" ]
null
null
null
examples/2D/2_training.ipynb
feberhardt/stardist
881968c57e8482e51c8a1fbc67239bbf6e960623
[ "BSD-3-Clause" ]
null
null
null
examples/2D/2_training.ipynb
feberhardt/stardist
881968c57e8482e51c8a1fbc67239bbf6e960623
[ "BSD-3-Clause" ]
null
null
null
286.198953
144,576
0.91249
[ [ [ "from __future__ import print_function, unicode_literals, absolute_import, division\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\n%matplotlib inline\n%config InlineBackend.figure_format = 'retina'\n\nfrom glob import glob\nfrom tqdm import tqdm\nfrom tifffile import imread\nfrom csbdeep.utils import Path, normalize\n\nfrom stardist import fill_label_holes, random_label_cmap, calculate_extents, gputools_available\nfrom stardist.models import Config2D, StarDist2D, StarDistData2D\n\nnp.random.seed(42)\nlbl_cmap = random_label_cmap()", "Using TensorFlow backend.\n" ] ], [ [ "# Data\n\nWe assume that data has already been downloaded via notebook [1_data.ipynb](1_data.ipynb). \n\n<div class=\"alert alert-block alert-info\">\nTraining data (for input `X` with associated label masks `Y`) can be provided via lists of numpy arrays, where each image can have a different size. Alternatively, a single numpy array can also be used if all images have the same size. \nInput images can either be two-dimensional (single-channel) or three-dimensional (multi-channel) arrays, where the channel axis comes last. Label images need to be integer-valued.\n</div>", "_____no_output_____" ] ], [ [ "X = sorted(glob('data/dsb2018/train/images/*.tif'))\nY = sorted(glob('data/dsb2018/train/masks/*.tif'))\nassert all(Path(x).name==Path(y).name for x,y in zip(X,Y))", "_____no_output_____" ], [ "X = list(map(imread,X))\nY = list(map(imread,Y))\nn_channel = 1 if X[0].ndim == 2 else X[0].shape[-1]", "_____no_output_____" ] ], [ [ "Normalize images and fill small label holes.", "_____no_output_____" ] ], [ [ "axis_norm = (0,1) # normalize channels independently\n# axis_norm = (0,1,2) # normalize channels jointly\nif n_channel > 1:\n print(\"Normalizing image channels %s.\" % ('jointly' if axis_norm is None or 2 in axis_norm else 'independently'))\n sys.stdout.flush()\n\nX = [normalize(x,1,99.8,axis=axis_norm) for x in tqdm(X)]\nY = [fill_label_holes(y) for y in tqdm(Y)]", "100%|██████████| 447/447 [00:01<00:00, 462.35it/s]\n100%|██████████| 447/447 [00:04<00:00, 111.61it/s]\n" ] ], [ [ "Split into train and validation datasets.", "_____no_output_____" ] ], [ [ "assert len(X) > 1, \"not enough training data\"\nrng = np.random.RandomState(42)\nind = rng.permutation(len(X))\nn_val = max(1, int(round(0.15 * len(ind))))\nind_train, ind_val = ind[:-n_val], ind[-n_val:]\nX_val, Y_val = [X[i] for i in ind_val] , [Y[i] for i in ind_val]\nX_trn, Y_trn = [X[i] for i in ind_train], [Y[i] for i in ind_train] \nprint('number of images: %3d' % len(X))\nprint('- training: %3d' % len(X_trn))\nprint('- validation: %3d' % len(X_val))", "number of images: 447\n- training: 380\n- validation: 67\n" ] ], [ [ "Training data consists of pairs of input image and label instances.", "_____no_output_____" ] ], [ [ "i = min(9, len(X)-1)\nimg, lbl = X[i], Y[i]\nassert img.ndim in (2,3)\nimg = img if img.ndim==2 else img[...,:3]\nplt.figure(figsize=(16,10))\nplt.subplot(121); plt.imshow(img,cmap='gray'); plt.axis('off'); plt.title('Raw image')\nplt.subplot(122); plt.imshow(lbl,cmap=lbl_cmap); plt.axis('off'); plt.title('GT labels')\nNone;", "_____no_output_____" ] ], [ [ "# Configuration\n\nA `StarDist2D` model is specified via a `Config2D` object.", "_____no_output_____" ] ], [ [ "print(Config2D.__doc__)", "Configuration for a :class:`StarDist2D` model.\n\n Parameters\n ----------\n axes : str or None\n Axes of the input images.\n n_rays : int\n Number of radial directions for the star-convex polygon.\n Recommended to use a power of 2 (default: 32).\n n_channel_in : int\n Number of channels of given input image (default: 1).\n grid : (int,int)\n Subsampling factors (must be powers of 2) for each of the axes.\n Model will predict on a subsampled grid for increased efficiency and larger field of view.\n backbone : str\n Name of the neural network architecture to be used as backbone.\n kwargs : dict\n Overwrite (or add) configuration attributes (see below).\n\n\n Attributes\n ----------\n unet_n_depth : int\n Number of U-Net resolution levels (down/up-sampling layers).\n unet_kernel_size : (int,int)\n Convolution kernel size for all (U-Net) convolution layers.\n unet_n_filter_base : int\n Number of convolution kernels (feature channels) for first U-Net layer.\n Doubled after each down-sampling layer.\n unet_pool : (int,int)\n Maxpooling size for all (U-Net) convolution layers.\n net_conv_after_unet : int\n Number of filters of the extra convolution layer after U-Net (0 to disable).\n unet_* : *\n Additional parameters for U-net backbone.\n train_shape_completion : bool\n Train model to predict complete shapes for partially visible objects at image boundary.\n train_completion_crop : int\n If 'train_shape_completion' is set to True, specify number of pixels to crop at boundary of training patches.\n Should be chosen based on (largest) object sizes.\n train_patch_size : (int,int)\n Size of patches to be cropped from provided training images.\n train_background_reg : float\n Regularizer to encourage distance predictions on background regions to be 0.\n train_dist_loss : str\n Training loss for star-convex polygon distances ('mse' or 'mae').\n train_loss_weights : tuple of float\n Weights for losses relating to (probability, distance)\n train_epochs : int\n Number of training epochs.\n train_steps_per_epoch : int\n Number of parameter update steps per epoch.\n train_learning_rate : float\n Learning rate for training.\n train_batch_size : int\n Batch size for training.\n train_n_val_patches : int\n Number of patches to be extracted from validation images (``None`` = one patch per image).\n train_tensorboard : bool\n Enable TensorBoard for monitoring training progress.\n train_reduce_lr : dict\n Parameter :class:`dict` of ReduceLROnPlateau_ callback; set to ``None`` to disable.\n use_gpu : bool\n Indicate that the data generator should use OpenCL to do computations on the GPU.\n\n .. _ReduceLROnPlateau: https://keras.io/callbacks/#reducelronplateau\n \n" ], [ "# 32 is a good default choice (see 1_data.ipynb)\nn_rays = 32\n\n# Use OpenCL-based computations for data generator during training (requires 'gputools')\nuse_gpu = False and gputools_available()\n\n# Predict on subsampled grid for increased efficiency and larger field of view\ngrid = (2,2)\n\nconf = Config2D (\n n_rays = n_rays,\n grid = grid,\n use_gpu = use_gpu,\n n_channel_in = n_channel,\n)\nprint(conf)\nvars(conf)", "Config2D(axes='YXC', backbone='unet', grid=(2, 2), n_channel_in=1, n_channel_out=33, n_dim=2, n_rays=32, net_conv_after_unet=128, net_input_shape=(None, None, 1), net_mask_shape=(None, None, 1), train_background_reg=0.0001, train_batch_size=4, train_checkpoint='weights_best.h5', train_checkpoint_epoch='weights_now.h5', train_checkpoint_last='weights_last.h5', train_completion_crop=32, train_dist_loss='mae', train_epochs=400, train_learning_rate=0.0003, train_loss_weights=(1, 0.2), train_n_val_patches=None, train_patch_size=(256, 256), train_reduce_lr={'factor': 0.5, 'patience': 40, 'min_delta': 0}, train_shape_completion=False, train_steps_per_epoch=100, train_tensorboard=True, unet_activation='relu', unet_batch_norm=False, unet_dropout=0.0, unet_kernel_size=(3, 3), unet_last_activation='relu', unet_n_conv_per_depth=2, unet_n_depth=3, unet_n_filter_base=32, unet_pool=(2, 2), unet_prefix='', use_gpu=False)\n" ], [ "if use_gpu:\n from csbdeep.utils.tf import limit_gpu_memory\n # adjust as necessary: limit GPU memory to be used by TensorFlow to leave some to OpenCL-based computations\n limit_gpu_memory(0.8)", "_____no_output_____" ] ], [ [ "**Note:** The trained `StarDist2D` model will *not* predict completed shapes for partially visible objects at the image boundary if `train_shape_completion=False` (which is the default option).", "_____no_output_____" ] ], [ [ "model = StarDist2D(conf, name='stardist', basedir='models')", "Using default values: prob_thresh=0.5, nms_thresh=0.4.\n" ] ], [ [ "Check if the neural network has a large enough field of view to see up to the boundary of most objects.", "_____no_output_____" ] ], [ [ "median_size = calculate_extents(list(Y), np.median)\nfov = np.array(model._axes_tile_overlap('YX'))\nif any(median_size > fov):\n print(\"WARNING: median object size larger than field of view of the neural network.\")", "_____no_output_____" ] ], [ [ "# Training", "_____no_output_____" ], [ "You can define a function/callable that applies augmentation to each batch of the data generator.", "_____no_output_____" ] ], [ [ "augmenter = None\n\n# def augmenter(X_batch, Y_batch):\n# \"\"\"Augmentation for data batch.\n# X_batch is a list of input images (length at most batch_size)\n# Y_batch is the corresponding list of ground-truth label images\n# \"\"\"\n# # ...\n# return X_batch, Y_batch", "_____no_output_____" ] ], [ [ "We recommend to monitor the progress during training with [TensorBoard](https://www.tensorflow.org/programmers_guide/summaries_and_tensorboard). You can start it in the shell from the current working directory like this:\n\n $ tensorboard --logdir=.\n\nThen connect to [http://localhost:6006/](http://localhost:6006/) with your browser.\n", "_____no_output_____" ] ], [ [ "quick_demo = True\n\nif quick_demo:\n print (\n \"NOTE: This is only for a quick demonstration!\\n\"\n \" Please set the variable 'quick_demo = False' for proper (long) training.\",\n file=sys.stderr, flush=True\n )\n model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter,\n epochs=2, steps_per_epoch=10)\n\n print(\"====> Stopping training and loading previously trained demo model from disk.\", file=sys.stderr, flush=True)\n model = StarDist2D(None, name='2D_demo', basedir='../../models/examples')\n model.basedir = None # to prevent files of the demo model to be overwritten (not needed for your model)\nelse:\n model.train(X_trn, Y_trn, validation_data=(X_val,Y_val), augmenter=augmenter)\nNone;", "NOTE: This is only for a quick demonstration!\n Please set the variable 'quick_demo = False' for proper (long) training.\n" ] ], [ [ "# Threshold optimization", "_____no_output_____" ], [ "While the default values for the probability and non-maximum suppression thresholds already yield good results in many cases, we still recommend to adapt the thresholds to your data. The optimized threshold values are saved to disk and will be automatically loaded with the model.", "_____no_output_____" ] ], [ [ "model.optimize_thresholds(X_val, Y_val)", "NMS threshold = 0.3: 80%|████████ | 16/20 [00:46<00:17, 4.42s/it, 0.485 -> 0.796]\nNMS threshold = 0.4: 80%|████████ | 16/20 [00:46<00:17, 4.45s/it, 0.485 -> 0.796]\nNMS threshold = 0.5: 80%|████████ | 16/20 [00:50<00:18, 4.63s/it, 0.485 -> 0.796]\n" ] ] ]
[ "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ] ]
d0167d67bd424e1864d40e613030fd3b8a01b10d
6,305
ipynb
Jupyter Notebook
Datasets/Vectors/landsat_wrs2_grid.ipynb
YuePanEdward/earthengine-py-notebooks
cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5
[ "MIT" ]
1
2020-11-16T08:00:11.000Z
2020-11-16T08:00:11.000Z
Datasets/Vectors/landsat_wrs2_grid.ipynb
mllzl/earthengine-py-notebooks
cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5
[ "MIT" ]
null
null
null
Datasets/Vectors/landsat_wrs2_grid.ipynb
mllzl/earthengine-py-notebooks
cade6a81dd4dbbfb1b9b37aaf6955de42226cfc5
[ "MIT" ]
null
null
null
45.359712
1,031
0.602696
[ [ [ "<table class=\"ee-notebook-buttons\" align=\"left\">\n <td><a target=\"_blank\" href=\"https://github.com/giswqs/earthengine-py-notebooks/tree/master/Datasets/Vectors/landsat_wrs2_grid.ipynb\"><img width=32px src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" /> View source on GitHub</a></td>\n <td><a target=\"_blank\" href=\"https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/landsat_wrs2_grid.ipynb\"><img width=26px src=\"https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png\" />Notebook Viewer</a></td>\n <td><a target=\"_blank\" href=\"https://mybinder.org/v2/gh/giswqs/earthengine-py-notebooks/master?filepath=Datasets/Vectors/landsat_wrs2_grid.ipynb\"><img width=58px src=\"https://mybinder.org/static/images/logo_social.png\" />Run in binder</a></td>\n <td><a target=\"_blank\" href=\"https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Datasets/Vectors/landsat_wrs2_grid.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" /> Run in Google Colab</a></td>\n</table>", "_____no_output_____" ], [ "## Install Earth Engine API and geemap\nInstall the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.\nThe following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.\n\n**Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).", "_____no_output_____" ] ], [ [ "# Installs geemap package\nimport subprocess\n\ntry:\n import geemap\nexcept ImportError:\n print('geemap package not installed. Installing ...')\n subprocess.check_call([\"python\", '-m', 'pip', 'install', 'geemap'])\n\n# Checks whether this notebook is running on Google Colab\ntry:\n import google.colab\n import geemap.eefolium as emap\nexcept:\n import geemap as emap\n\n# Authenticates and initializes Earth Engine\nimport ee\n\ntry:\n ee.Initialize()\nexcept Exception as e:\n ee.Authenticate()\n ee.Initialize() ", "_____no_output_____" ] ], [ [ "## Create an interactive map \nThe default basemap is `Google Satellite`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py#L13) can be added using the `Map.add_basemap()` function. ", "_____no_output_____" ] ], [ [ "Map = emap.Map(center=[40,-100], zoom=4)\nMap.add_basemap('ROADMAP') # Add Google Map\nMap", "_____no_output_____" ] ], [ [ "## Add Earth Engine Python script ", "_____no_output_____" ] ], [ [ "# Add Earth Engine dataset\ndataset = ee.FeatureCollection('projects/google/wrs2_descending')\n\nempty = ee.Image().byte()\n\nMap.setCenter(-78, 36, 8)\nMap.addLayer(empty.paint(dataset, 0, 2), {}, 'Landsat WRS-2 grid')", "_____no_output_____" ] ], [ [ "## Display Earth Engine data layers ", "_____no_output_____" ] ], [ [ "Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.\nMap", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d016934527e29c87c0112891aa293b0b295b5160
3,737
ipynb
Jupyter Notebook
dynamicProgramming/lgstPalSubstring.ipynb
NoCodeProgram/CodingTest
4756f29c40722dc00fe62632e9c21aefa83da136
[ "MIT" ]
3
2021-07-17T00:00:09.000Z
2022-02-25T06:22:24.000Z
dynamicProgramming/lgstPalSubstring.ipynb
NoCodeProgram/CodingTest
4756f29c40722dc00fe62632e9c21aefa83da136
[ "MIT" ]
null
null
null
dynamicProgramming/lgstPalSubstring.ipynb
NoCodeProgram/CodingTest
4756f29c40722dc00fe62632e9c21aefa83da136
[ "MIT" ]
3
2021-07-17T00:00:09.000Z
2022-01-18T03:21:12.000Z
28.097744
256
0.453305
[ [ [ "<a href=\"https://colab.research.google.com/github/NoCodeProgram/CodingTest/blob/main/dynamicProgramming/lgstPalSubstring.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "Title : Longest Palindromic Substring\n\nChapter : Dynamic Programming\n\nLink : [YouTube](https://youtu.be/LYHFaO1lgYM)\n\nChapterLink : [PlayList](https://youtube.com/playlist?list=PLDV-cCQnUlIa0owhTLK-VT994Qh6XTy4v)\n\n문제: 주어진 string s에서, 가장 긴 palindromic substring을 return하여라", "_____no_output_____" ] ], [ [ "def longestPalindrome(s: str) -> str:\n str_length = len(s)\n dp_table = [[0] * str_length for i in range(str_length)]\n \n for idx in range (str_length):\n dp_table[idx][idx] = 1\n \n for idx in range (str_length -1):\n start_char = s[idx]\n end_char = s[idx+1]\n if start_char == end_char:\n dp_table[idx][idx+1] = 2\n\n for idx in range (2, str_length):\n row = 0\n col = idx\n while col < str_length: \n start_char = s[row]\n end_char = s[col]\n prev_count = dp_table[row+1][col-1]\n if start_char == end_char and prev_count != 0:\n dp_table[row][col] = prev_count + 2 \n row += 1\n col += 1\n \n \n max_length = 0\n start_idx = 0\n end_idx = 0\n for row in range (str_length):\n for col in range (str_length):\n crnt_length = dp_table[row][col]\n if max_length < crnt_length:\n max_length = crnt_length\n start_idx = row\n end_idx = col\n \n sub_str = s[start_idx:end_idx+1]\n\n return sub_str", "_____no_output_____" ], [ "print(longestPalindrome(s='baabc'))", "baab\n" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code", "code", "code" ] ]
d016a6cd9ed36aa228d93c2ceae9077caf1381c3
8,087
ipynb
Jupyter Notebook
code/class_6/3. RNN_classification_multi.ipynb
chuckgu/nabi
aa3e3886896334512658b00a491c618b2a63fe63
[ "Apache-2.0" ]
1
2017-11-14T10:21:30.000Z
2017-11-14T10:21:30.000Z
code/class_6/3. RNN_classification_multi.ipynb
chuckgu/nabi
aa3e3886896334512658b00a491c618b2a63fe63
[ "Apache-2.0" ]
null
null
null
code/class_6/3. RNN_classification_multi.ipynb
chuckgu/nabi
aa3e3886896334512658b00a491c618b2a63fe63
[ "Apache-2.0" ]
6
2020-01-23T12:02:17.000Z
2021-03-15T16:49:58.000Z
34.266949
126
0.51997
[ [ [ "import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n# set random seed for comparing the two result calculations\ntf.set_random_seed(1)\n\n# this is data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\n\n", "Extracting MNIST_data/train-images-idx3-ubyte.gz\nExtracting MNIST_data/train-labels-idx1-ubyte.gz\nExtracting MNIST_data/t10k-images-idx3-ubyte.gz\nExtracting MNIST_data/t10k-labels-idx1-ubyte.gz\n" ], [ "# hyperparameters\nlr = 0.001\ntraining_iters = 100000\nbatch_size = 128\n\nn_inputs = 28 # MNIST data input (img shape: 28*28)\nn_steps = 28 # time steps\nn_hidden_units = 128 # neurons in hidden layer\nn_classes = 10 # MNIST classes (0-9 digits)\nnum_layers=2\n\n# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_classes])\n\n# Define weights\nweights = {\n # (28, 128)\n 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),\n # (128, 10)\n 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))\n}\nbiases = {\n # (128, )\n 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),\n # (10, )\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))\n}\nprint (\"parameters ready\")", "parameters ready\n" ], [ "# tf Graph input\nx = tf.placeholder(tf.float32, [None, n_steps, n_inputs])\ny = tf.placeholder(tf.float32, [None, n_classes])\n\n# Define weights\nweights = {\n # (28, 128)\n 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])),\n # (128, 10)\n 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes]))\n}\nbiases = {\n # (128, )\n 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])),\n # (10, )\n 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ]))\n}\n\ndef RNN(X, weights, biases):\n # hidden layer for input to cell\n ########################################\n\n # transpose the inputs shape from\n # X ==> (128 batch * 28 steps, 28 inputs)\n X = tf.reshape(X, [-1, n_inputs])\n\n # into hidden\n # X_in = (128 batch * 28 steps, 128 hidden)\n X_in = tf.matmul(X, weights['in']) + biases['in']\n # X_in ==> (128 batch, 28 steps, 128 hidden)\n X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units])\n\n # cell\n ##########################################\n\n # basic LSTM Cell.\n if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:\n cell = tf.nn.rnn_cell.BasicLSTMCell(n_hidden_units, forget_bias=1.0, state_is_tuple=True)\n\n cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=0.5)\n cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers)\n \n else:\n cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units)\n\n cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=0.5)\n cell = tf.contrib.rnn.MultiRNNCell([cell] * num_layers) \n \n # lstm cell is divided into two parts (c_state, h_state)\n init_state = cell.zero_state(batch_size, dtype=tf.float32)\n\n # You have 2 options for following step.\n # 1: tf.nn.rnn(cell, inputs);\n # 2: tf.nn.dynamic_rnn(cell, inputs).\n # If use option 1, you have to modified the shape of X_in, go and check out this:\n # https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/3_NeuralNetworks/recurrent_network.py\n # In here, we go for option 2.\n # dynamic_rnn receive Tensor (batch, steps, inputs) or (steps, batch, inputs) as X_in.\n # Make sure the time_major is changed accordingly.\n outputs, final_state = tf.nn.dynamic_rnn(cell, X_in, initial_state=init_state, time_major=False)\n\n # hidden layer for output as the final results\n #############################################\n # results = tf.matmul(final_state[1], weights['out']) + biases['out']\n\n # # or\n # unpack to list [(batch, outputs)..] * steps\n if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:\n outputs = tf.unpack(tf.transpose(outputs, [1, 0, 2])) # states is the last outputs\n else:\n outputs = tf.unstack(tf.transpose(outputs, [1,0,2]))\n results = tf.matmul(outputs[-1], weights['out']) + biases['out'] # shape = (128, 10)\n\n return results\n\npred = RNN(x, weights, biases)\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))\ntrain_op = tf.train.AdamOptimizer(lr).minimize(cost)\n\ncorrect_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\nprint (\"Network ready\")\n", "Network ready\n" ], [ "with tf.Session() as sess:\n # tf.initialize_all_variables() no long valid from\n # 2017-03-02 if using tensorflow >= 0.12\n if int((tf.__version__).split('.')[1]) < 12 and int((tf.__version__).split('.')[0]) < 1:\n init = tf.initialize_all_variables()\n else:\n init = tf.global_variables_initializer()\n sess.run(init)\n step = 0\n while step * batch_size < training_iters:\n batch_xs, batch_ys = mnist.train.next_batch(batch_size)\n batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs])\n _, acc, loss=sess.run([train_op,accuracy,cost], feed_dict={\n x: batch_xs,\n y: batch_ys,\n })\n if step % 20 == 0:\n print (\"Iter \" + str(step*batch_size) + \", Minibatch Loss= \" + \\\n \"{:.6f}\".format(loss) + \", Training Accuracy= \" + \\\n \"{:.5f}\".format(acc))\n step += 1", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d016a99a5445405237fefa228d8bcf19fb237ac4
40,316
ipynb
Jupyter Notebook
jupyter/model_comparison.ipynb
mseinstein/Proofcheck
aa87c99b0e655ed777657a5b85ad91cd686dec47
[ "MIT" ]
null
null
null
jupyter/model_comparison.ipynb
mseinstein/Proofcheck
aa87c99b0e655ed777657a5b85ad91cd686dec47
[ "MIT" ]
null
null
null
jupyter/model_comparison.ipynb
mseinstein/Proofcheck
aa87c99b0e655ed777657a5b85ad91cd686dec47
[ "MIT" ]
null
null
null
44.449835
2,835
0.367968
[ [ [ "# Create Temporary Datasets for Analysis\n\nSimulate the proofcheck dataset until you get access to it", "_____no_output_____" ] ], [ [ "import pandas as pd\nmov_meta = pd.read_csv('movie_metadata.csv')", "_____no_output_____" ], [ "mov_meta.head()", "_____no_output_____" ], [ "# For the sake of simplicity only look at colmns with numeric data\nmov_meta_nrw=mov_meta._get_numeric_data()\nmov_meta_nrw.head()", "_____no_output_____" ], [ "# variable of interest is gross and to make data similar to proofcheck will create binary variable\n# 1 if movie gross is greater than budget and 0 if not\nmov_meta_nrw['gross_bin'] = mov_meta_nrw['gross']>mov_meta_nrw['budget']\nmov_meta_nrw.drop(['gross','budget'], axis=1,inplace=True) \nmov_meta_nrw.head()", "_____no_output_____" ], [ "xtrain = mov_meta_nrw.iloc[:,:-1]\nytrain = mov_meta_nrw.iloc[:,-1]", "_____no_output_____" ], [ "from sklearn.linear_model import LogisticRegression\nlr = LogisticRegression()", "_____no_output_____" ], [ "lr_fit = lr.fit(xtrain,ytrain)", "_____no_output_____" ], [ "type(ytrain)", "_____no_output_____" ], [ "from sklearn import datasets\ndataset = datasets.load_iris()\ndataset", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d016aada388005bcbc6ca65c81979973a8747f3f
607,795
ipynb
Jupyter Notebook
notebooks/11_InferenceEyes.ipynb
vladimir-chernykh/facestyle-gan
a62563be0679ce5796f26adaf8e9f559670f9fd9
[ "MIT" ]
1
2021-12-10T10:10:53.000Z
2021-12-10T10:10:53.000Z
notebooks/11_InferenceEyes.ipynb
vladimir-chernykh/facestyle-gan
a62563be0679ce5796f26adaf8e9f559670f9fd9
[ "MIT" ]
1
2021-09-23T19:36:02.000Z
2021-10-04T19:26:04.000Z
notebooks/11_InferenceEyes.ipynb
vladimir-chernykh/facestyle-gan
a62563be0679ce5796f26adaf8e9f559670f9fd9
[ "MIT" ]
1
2021-10-02T05:54:09.000Z
2021-10-02T05:54:09.000Z
749.438964
221,460
0.94667
[ [ [ "This notebook shows:\n* How to launch the [**StarGANv1**](https://arxiv.org/abs/1711.09020) model for inference\n* Example of results for both\n * attrubutes **detection**\n * new face **generation** with desired attributes\n\nHere I use [**PyTorch** implementation](https://github.com/yunjey/stargan) of the StarGANv1 model.\n\n[StarGANv1](https://arxiv.org/abs/1711.09020) was chosen because:\n* It provides an ability to generate images **contitionally**. One can control the \"amount\" of each desired feature via input vector.\n* It can **train (relatively) fast** on (relatively) small resources.\n\nThe model is pretty old though and has its own drawbacks:\n* It works well only with small resolution images (~128).\n* For bigger images the artifacts are inavoidable. They sometimes happen even for 128x128 images.\n\nThe obvious improvement is to use newer model, e.g., [StarGANv2](https://arxiv.org/abs/1912.01865) which was released in April 2020. It generates much better images at much higher resolution. But it requires both huge resoruces and lots of time to train.\n\nPrior to running this notebook please download the pretrained models:\n```\n../scripts/get_models.sh\n```", "_____no_output_____" ], [ "# Imports", "_____no_output_____" ], [ "Imort necessary libraries", "_____no_output_____" ] ], [ [ "import os\nimport sys\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"True\"\nsys.path.extend([\"../code/\", \"../stargan/\"])\n\nimport torch\nimport torchvision.transforms as T\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\n\nfrom config import get_config\n\nfrom solver import Solver", "_____no_output_____" ] ], [ [ "# Load model", "_____no_output_____" ], [ "Let's first load the config for the model. It is mostly default except for the:\n\n* model checkpoint path\n* style classes, their order and number\n\nNote that in the original StarGANv1 model 5 classes are used: `[Black_Hair Blond_Hair Brown_Hair Male Young]`.\n\nI retrained the model **4** times for different **face parts**. Each face part has several classes connected to it (see `DataExploration` notebook):\n* **nose**: `[Big_Nose, Pointy_Nose]`\n* **mouth**: `[Mouth_Slightly_Open, Smiling]`\n* **eyes**: `[Arched_Eyebrows, Bushy_Eyebrows, Bags_Under_Eyes, Eyeglasses, Narrow_Eyes]`\n* **hair**: `[Black_Hair, Blond_Hair, Brown_Hair, Gray_Hair, Bald Bangs, Receding_Hairline, Straight_Hair, Wavy_Hair]`\n\nHere I show the examples only for **eyes** class. But all other classes works in the same way and prediction examples are shown in the repo and in other notebooks.", "_____no_output_____" ] ], [ [ "config = get_config(\"\"\"\n--model_save_dir ../models/celeba_128_eyes/\n--test_iters 200000\n--c_dim 5\n--selected_attrs Arched_Eyebrows Bushy_Eyebrows Bags_Under_Eyes Eyeglasses Narrow_Eyes\n\"\"\")", "_____no_output_____" ] ], [ [ "Load the model architecture with the provided config.", "_____no_output_____" ] ], [ [ "model = Solver(None, None, config)", "Generator(\n (main): Sequential(\n (0): Conv2d(8, 64, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), bias=False)\n (1): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (5): ReLU(inplace=True)\n (6): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (7): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (8): ReLU(inplace=True)\n (9): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (10): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (11): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (12): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (13): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (14): ResidualBlock(\n (main): Sequential(\n (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (1): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (2): ReLU(inplace=True)\n (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (4): InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n )\n )\n (15): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (16): InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (17): ReLU(inplace=True)\n (18): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)\n (19): InstanceNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n (20): ReLU(inplace=True)\n (21): Conv2d(64, 3, kernel_size=(7, 7), stride=(1, 1), padding=(3, 3), bias=False)\n (22): Tanh()\n )\n)\nG\nThe number of parameters: 8430528\nDiscriminator(\n (main): Sequential(\n (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (1): LeakyReLU(negative_slope=0.01)\n (2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (3): LeakyReLU(negative_slope=0.01)\n (4): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (5): LeakyReLU(negative_slope=0.01)\n (6): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (7): LeakyReLU(negative_slope=0.01)\n (8): Conv2d(512, 1024, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (9): LeakyReLU(negative_slope=0.01)\n (10): Conv2d(1024, 2048, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))\n (11): LeakyReLU(negative_slope=0.01)\n )\n (conv1): Conv2d(2048, 1, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n (conv2): Conv2d(2048, 5, kernel_size=(2, 2), stride=(1, 1), bias=False)\n)\nD\nThe number of parameters: 44762048\n" ] ], [ [ "Restore model weights.", "_____no_output_____" ] ], [ [ "model.restore_model(model.test_iters)", "Loading the trained models from step 200000...\n" ] ], [ [ "# Prediction example", "_____no_output_____" ], [ "Let's read a test image.\n\nNote that the **face position and size** should be comparable to what the model has seen in the training data (CelebA). Here I do not use any face detector and crop the faces manually. But in production environment one needs to setup the face detector correspondingly.", "_____no_output_____" ] ], [ [ "image = Image.open(\"../data/test.jpg\")\nimage", "_____no_output_____" ] ], [ [ "The input to the network is **3x128x128 image in a range [-1; 1]** (note that the channels is the first dimension).\n\nThus one need to do preprocessing in advance.", "_____no_output_____" ] ], [ [ "transform = []\ntransform.append(T.Resize(128))\ntransform.append(T.CenterCrop(128))\ntransform.append(T.ToTensor())\ntransform.append(T.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)))\ntransform = T.Compose(transform)", "_____no_output_____" ] ], [ [ "Create a batch of 1 image", "_____no_output_____" ] ], [ [ "x_real = torch.stack([transform(image)])\nx_real.shape", "_____no_output_____" ] ], [ [ "## Attributes prediction", "_____no_output_____" ], [ "Let's first predict the attbibutes of the image. To do so I use the **Discriminator** part of the network. In StarGAN architecture it predicts not only the fake/real label but also the classes/attributes/styles of the image.\n\nHere I call this vector **eigen style vector**. Note that due to the possible co-existence of multiple labels and the corresponding training procedure (Sigmoid + BCELoss instead of Softmax + CrossEntropyLoss) I use sigmoid activation function here and treat predicted labels separately (instead of softmax and 1-of-all). ", "_____no_output_____" ] ], [ [ "with torch.no_grad():\n eigen_style_vector = torch.sigmoid(model.D(x_real)[1])", "_____no_output_____" ] ], [ [ "Below is the probability of each label. The photo indeed depicts a person with big and little bit arched eyebrows.", "_____no_output_____" ] ], [ [ "for proba, tag in zip(eigen_style_vector.numpy()[0], model.selected_attrs):\n print(f\"{tag:20s}: {proba:.3f}\")", "Arched_Eyebrows : 0.334\nBushy_Eyebrows : 0.207\nBags_Under_Eyes : 0.054\nEyeglasses : 0.000\nNarrow_Eyes : 0.081\n" ] ], [ [ "Now let's look at how well the **Generator** model can recreate the face without altering it using the just computed eigen style vector.", "_____no_output_____" ] ], [ [ "with torch.no_grad():\n res_eigen = model.G(x_real, eigen_style_vector)\nres_eigen.shape", "_____no_output_____" ] ], [ [ "Plot the original face and the reconstructed one:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(9, 8))\n\nplt.subplot(121)\n_img = model.denorm(x_real).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Original\", fontsize=16)\n\nplt.subplot(122)\n_img = model.denorm(res_eigen).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Eigen style reconstruction\", fontsize=16);", "_____no_output_____" ] ], [ [ "Looks good enough.", "_____no_output_____" ], [ "## Face modification using new attributes", "_____no_output_____" ], [ "Now let's try to modify the face starting from the eigen style vector.\n\nLet's say, I want to **add eyeglasses**. To do so I am to set the corresponding style vector component to 1.", "_____no_output_____" ] ], [ [ "eigen_style_vector_modified_1 = eigen_style_vector.clone()\neigen_style_vector_modified_1[:, 3] = 1", "_____no_output_____" ] ], [ [ "Now the style vector looks the following:", "_____no_output_____" ] ], [ [ "for proba, tag in zip(eigen_style_vector_modified_1.numpy()[0], model.selected_attrs):\n print(f\"{tag:20s}: {proba:.3f}\")", "Arched_Eyebrows : 0.334\nBushy_Eyebrows : 0.207\nBags_Under_Eyes : 0.054\nEyeglasses : 1.000\nNarrow_Eyes : 0.081\n" ] ], [ [ "Let's try to generate face with this modified style vector:", "_____no_output_____" ] ], [ [ "with torch.no_grad():\n res_modified_1 = model.G(x_real, eigen_style_vector_modified_1)\nres_modified_1.shape", "_____no_output_____" ] ], [ [ "Plot the faces:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(13.5, 8))\n\nplt.subplot(131)\n_img = model.denorm(x_real).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Original\", fontsize=16)\n\nplt.subplot(132)\n_img = model.denorm(res_eigen).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Eigen style reconstruction\", fontsize=16);\n\nplt.subplot(133)\n_img = model.denorm(res_modified_1).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Eyeglasses\", fontsize=16);", "_____no_output_____" ] ], [ [ "Now let's try to **change two attributes simultaneously**:\n* Make the eyes narrow\n* Add archness to the eyebrows", "_____no_output_____" ] ], [ [ "eigen_style_vector_modified_2 = eigen_style_vector.clone()\neigen_style_vector_modified_2[:, 0] = 1\neigen_style_vector_modified_2[:, 4] = 1", "_____no_output_____" ] ], [ [ "Now the style vector looks the following:", "_____no_output_____" ] ], [ [ "for proba, tag in zip(eigen_style_vector_modified_2.numpy()[0], model.selected_attrs):\n print(f\"{tag:20s}: {proba:.3f}\")", "Arched_Eyebrows : 1.000\nBushy_Eyebrows : 0.207\nBags_Under_Eyes : 0.054\nEyeglasses : 0.000\nNarrow_Eyes : 1.000\n" ] ], [ [ "Let's try to generate face with this modified style vector:", "_____no_output_____" ] ], [ [ "with torch.no_grad():\n res_modified_2 = model.G(x_real, eigen_style_vector_modified_2)\nres_modified_2.shape", "_____no_output_____" ] ], [ [ "Plot the faces:", "_____no_output_____" ] ], [ [ "plt.figure(figsize=(18, 8))\n\nplt.subplot(141)\n_img = model.denorm(x_real).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Original\", fontsize=16)\n\nplt.subplot(142)\n_img = model.denorm(res_eigen).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Eigen style reconstruction\", fontsize=16);\n\nplt.subplot(143)\n_img = model.denorm(res_modified_1).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Eyeglasses\", fontsize=16);\n\nplt.subplot(144)\n_img = model.denorm(res_modified_2).numpy()[0].transpose((1, 2, 0))\nplt.imshow(_img)\nplt.axis(\"off\")\nplt.title(\"Arched eyebrows + Narrow\", fontsize=16);", "_____no_output_____" ] ], [ [ "Looks good!", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ] ]
d016b84db8816a081ef5d8dcb4ec661a79ef5bbb
6,442
ipynb
Jupyter Notebook
.ipynb_checkpoints/TestAprs-checkpoint.ipynb
ignaciop000/afsk
3a36d2d550eba56e2f13dabdf70952c61196ce5e
[ "BSD-2-Clause" ]
null
null
null
.ipynb_checkpoints/TestAprs-checkpoint.ipynb
ignaciop000/afsk
3a36d2d550eba56e2f13dabdf70952c61196ce5e
[ "BSD-2-Clause" ]
null
null
null
.ipynb_checkpoints/TestAprs-checkpoint.ipynb
ignaciop000/afsk
3a36d2d550eba56e2f13dabdf70952c61196ce5e
[ "BSD-2-Clause" ]
null
null
null
57.00885
1,697
0.673393
[ [ [ "import logging\nlogging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nfrom afsk.ax25 import UI\nfrom afsk.afsk import encode\nimport audiogen\nimport sys", "_____no_output_____" ], [ "packet = UI(\n\tdestination='APRS',\n\tsource='LU8AIE', \n\tinfo=':EMAIL :[email protected] Test email'\n)", "_____no_output_____" ], [ "logger.info(r\"Sending packet: '{0}'\".format(packet))\nlogger.debug(r\"Packet bits:{0!r}\".format(packet.unparse()))", "INFO:__main__:Sending packet: 'LU8AIE>APRS,WIDE1-1,WIDE2-1::EMAIL :[email protected] Test email'\nDEBUG:__main__:Packet bits:bitarray('011111100100000100000101001001010110010100000010000000100000011000011001010101010000111001000001010010010101000100000110011101010100100100010001010100010100011000000010010001100111010101001001000100010101000100100110000000101100011011000000000011110101110010100010101100101000001010010010001100100000010000000100000001000000010001011100001101101010111000011100100001101001011010100110000000101110011010110110100001101001011000110110011101001100011011110110101101100000010000101010101001101100111000101110000001001010011010110110100001101001011000110110001101101100101101111110')\n" ], [ "audio = encode(packet.unparse())", "_____no_output_____" ], [ "audiogen.sampler.write_wav(sys.stdout, audio)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code" ] ]
d016b857a10fca2d1d40aefeb5ce99b1b04da05f
20,892
ipynb
Jupyter Notebook
43-workout-solution_decision_trees.ipynb
hanisaf/advanced-data-management-and-analytics
e7bffda5cad91374a14df1a65f95e6a25f72cc41
[ "MIT" ]
6
2020-04-13T19:22:18.000Z
2021-04-20T18:20:13.000Z
43-workout-solution_decision_trees.ipynb
hanisaf/advanced-data-management-and-analytics
e7bffda5cad91374a14df1a65f95e6a25f72cc41
[ "MIT" ]
null
null
null
43-workout-solution_decision_trees.ipynb
hanisaf/advanced-data-management-and-analytics
e7bffda5cad91374a14df1a65f95e6a25f72cc41
[ "MIT" ]
10
2020-05-12T01:02:32.000Z
2022-02-28T17:04:37.000Z
34.82
361
0.352288
[ [ [ "import pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.tree import export_text", "_____no_output_____" ] ], [ [ "This example uses the [Universal Bank](https://www.kaggle.com/sriharipramod/bank-loan-classification) data set and some example code of running classification trees from chapter 9 of [Data Mining for Business Analytics](https://www.dataminingbook.com/book/python-edition)\n\n> The data include customer demographic information (age, income, etc.), the customer's relationship with the bank (mortgage, securities account, etc.), and the customer response to the last personal loan campaign (Personal Loan). Among these 5000 customers, only 480 (= 9.6%) accepted the personal loan that was offered to them in the earlier campaign\n[Source](https://www.kaggle.com/itsmesunil/campaign-for-selling-personal-loans)\n\n1. Train a decision tree classifier, print the tree and evaluate its accuracy.\n2. Prune the tree by changing its hyper parameters, evaluate the accuracy of the new tree.\n3. Using [grid search](https://scikit-learn.org/stable/modules/grid_search.html), perform a systematic tuning of the decision tree hyper parameters.", "_____no_output_____" ] ], [ [ "data = pd.read_csv('data/UniversalBank.csv')\ndata.head()", "_____no_output_____" ] ], [ [ "Courtesy - Statistics.com \n\nData Description:\nID \t\t \t Customer ID\nAge \t\t Customer's age in completed years\nExperience \t #years of professional experience\nIncome \t\t Annual income of the customer ($000)\nZIPCode \t\t Home Address ZIP code.\nFamily \t\t Family size of the customer\nCCAvg \t\t Avg. spending on credit cards per month ($000)\nEducation \t \t Education Level. 1: Undergrad; 2: Graduate; 3: Advanced/Professional\nMortgage \t \t Value of house mortgage if any. ($000)\nPersonal Loan \t Did this customer accept the personal loan offered in the last campaign?\nSecurities Account Does the customer have a securities account with the bank?\nCD Account \t Does the customer have a certificate of deposit (CD) account with the bank?\nOnline \t\t Does the customer use internet banking facilities?\nCreditCard \t Does the customer use a credit card issued by UniversalBank?", "_____no_output_____" ] ], [ [ "bank_df = data.drop(columns=['ID', 'ZIP Code'])\n\nX = bank_df.drop(columns=['Personal Loan'])\ny = bank_df['Personal Loan']\ntrain_X, valid_X, train_y, valid_y = train_test_split(X, y, test_size=0.4, random_state=1)\n\ndtree = DecisionTreeClassifier()\ndtree.fit(train_X, train_y)\n\nprint(export_text(dtree, feature_names=list(X.columns)))", "|--- Income <= 110.50\n| |--- CCAvg <= 2.95\n| | |--- Income <= 106.50\n| | | |--- class: 0\n| | |--- Income > 106.50\n| | | |--- Family <= 3.50\n| | | | |--- class: 0\n| | | |--- Family > 3.50\n| | | | |--- Age <= 38.00\n| | | | | |--- class: 0\n| | | | |--- Age > 38.00\n| | | | | |--- class: 1\n| |--- CCAvg > 2.95\n| | |--- CD Account <= 0.50\n| | | |--- Income <= 92.50\n| | | | |--- Age <= 26.50\n| | | | | |--- class: 1\n| | | | |--- Age > 26.50\n| | | | | |--- CCAvg <= 3.55\n| | | | | | |--- CCAvg <= 3.35\n| | | | | | | |--- CCAvg <= 3.05\n| | | | | | | | |--- class: 0\n| | | | | | | |--- CCAvg > 3.05\n| | | | | | | | |--- CCAvg <= 3.15\n| | | | | | | | | |--- Mortgage <= 89.00\n| | | | | | | | | | |--- class: 1\n| | | | | | | | | |--- Mortgage > 89.00\n| | | | | | | | | | |--- class: 0\n| | | | | | | | |--- CCAvg > 3.15\n| | | | | | | | | |--- class: 0\n| | | | | | |--- CCAvg > 3.35\n| | | | | | | |--- Family <= 3.00\n| | | | | | | | |--- class: 1\n| | | | | | | |--- Family > 3.00\n| | | | | | | | |--- class: 0\n| | | | | |--- CCAvg > 3.55\n| | | | | | |--- Income <= 81.50\n| | | | | | | |--- class: 0\n| | | | | | |--- Income > 81.50\n| | | | | | | |--- Income <= 83.50\n| | | | | | | | |--- Experience <= 20.00\n| | | | | | | | | |--- class: 0\n| | | | | | | | |--- Experience > 20.00\n| | | | | | | | | |--- Family <= 3.50\n| | | | | | | | | | |--- class: 1\n| | | | | | | | | |--- Family > 3.50\n| | | | | | | | | | |--- class: 0\n| | | | | | | |--- Income > 83.50\n| | | | | | | | |--- class: 0\n| | | |--- Income > 92.50\n| | | | |--- Education <= 1.50\n| | | | | |--- Experience <= 33.50\n| | | | | | |--- class: 0\n| | | | | |--- Experience > 33.50\n| | | | | | |--- Online <= 0.50\n| | | | | | | |--- class: 1\n| | | | | | |--- Online > 0.50\n| | | | | | | |--- class: 0\n| | | | |--- Education > 1.50\n| | | | | |--- CCAvg <= 4.30\n| | | | | | |--- Experience <= 37.50\n| | | | | | | |--- class: 1\n| | | | | | |--- Experience > 37.50\n| | | | | | | |--- Education <= 2.50\n| | | | | | | | |--- class: 0\n| | | | | | | |--- Education > 2.50\n| | | | | | | | |--- class: 1\n| | | | | |--- CCAvg > 4.30\n| | | | | | |--- Mortgage <= 149.00\n| | | | | | | |--- class: 1\n| | | | | | |--- Mortgage > 149.00\n| | | | | | | |--- class: 0\n| | |--- CD Account > 0.50\n| | | |--- Family <= 1.50\n| | | | |--- Mortgage <= 250.50\n| | | | | |--- class: 0\n| | | | |--- Mortgage > 250.50\n| | | | | |--- class: 1\n| | | |--- Family > 1.50\n| | | | |--- class: 1\n|--- Income > 110.50\n| |--- Education <= 1.50\n| | |--- Family <= 2.50\n| | | |--- class: 0\n| | |--- Family > 2.50\n| | | |--- Age <= 26.00\n| | | | |--- class: 0\n| | | |--- Age > 26.00\n| | | | |--- Income <= 113.50\n| | | | | |--- CD Account <= 0.50\n| | | | | | |--- class: 0\n| | | | | |--- CD Account > 0.50\n| | | | | | |--- class: 1\n| | | | |--- Income > 113.50\n| | | | | |--- class: 1\n| |--- Education > 1.50\n| | |--- Income <= 116.50\n| | | |--- CCAvg <= 3.50\n| | | | |--- CCAvg <= 2.80\n| | | | | |--- Mortgage <= 231.00\n| | | | | | |--- CCAvg <= 1.55\n| | | | | | | |--- class: 0\n| | | | | | |--- CCAvg > 1.55\n| | | | | | | |--- CCAvg <= 1.75\n| | | | | | | | |--- Age <= 51.50\n| | | | | | | | | |--- class: 1\n| | | | | | | | |--- Age > 51.50\n| | | | | | | | | |--- class: 0\n| | | | | | | |--- CCAvg > 1.75\n| | | | | | | | |--- Family <= 1.50\n| | | | | | | | | |--- class: 1\n| | | | | | | | |--- Family > 1.50\n| | | | | | | | | |--- class: 0\n| | | | | |--- Mortgage > 231.00\n| | | | | | |--- Experience <= 11.50\n| | | | | | | |--- class: 0\n| | | | | | |--- Experience > 11.50\n| | | | | | | |--- class: 1\n| | | | |--- CCAvg > 2.80\n| | | | | |--- CCAvg <= 3.35\n| | | | | | |--- class: 1\n| | | | | |--- CCAvg > 3.35\n| | | | | | |--- class: 0\n| | | |--- CCAvg > 3.50\n| | | | |--- class: 1\n| | |--- Income > 116.50\n| | | |--- class: 1\n\n" ], [ "print(confusion_matrix(train_y, dtree.predict(train_X)))\nprint(confusion_matrix(valid_y, dtree.predict(valid_X)))\naccuracy_score(train_y, dtree.predict(train_X)), accuracy_score(valid_y, dtree.predict(valid_X))", "[[2713 0]\n [ 0 287]]\n[[1794 13]\n [ 25 168]]\n" ], [ "dtree = DecisionTreeClassifier(max_depth=30, min_samples_split=20, min_impurity_decrease=0.01)\ndtree.fit(train_X, train_y)\nprint(export_text(dtree, feature_names=list(X.columns)))", "|--- Income <= 110.50\n| |--- class: 0\n|--- Income > 110.50\n| |--- Education <= 1.50\n| | |--- Family <= 2.50\n| | | |--- class: 0\n| | |--- Family > 2.50\n| | | |--- class: 1\n| |--- Education > 1.50\n| | |--- Income <= 116.50\n| | | |--- class: 0\n| | |--- Income > 116.50\n| | | |--- class: 1\n\n" ], [ "print(confusion_matrix(train_y, dtree.predict(train_X)))\nprint(confusion_matrix(valid_y, dtree.predict(valid_X)))\naccuracy_score(train_y, dtree.predict(train_X)), accuracy_score(valid_y, dtree.predict(valid_X))", "[[2711 2]\n [ 51 236]]\n[[1804 3]\n [ 43 150]]\n" ], [ "# Start with an initial guess for parameters\nparam_grid = {\n 'max_depth': [10, 20, 30, 40], \n 'min_samples_split': [20, 40, 60, 80, 100], \n 'min_impurity_decrease': [0, 0.0005, 0.001, 0.005, 0.01], \n}\ngridSearch = GridSearchCV(DecisionTreeClassifier(), param_grid, cv=5, n_jobs=-1)\ngridSearch.fit(train_X, train_y)\nprint('Score: ', gridSearch.best_score_)\nprint('Parameters: ', gridSearch.best_params_)\n\n\n\ndtree = gridSearch.best_estimator_", "Score: 0.988\nParameters: {'max_depth': 10, 'min_impurity_decrease': 0.001, 'min_samples_split': 20}\n" ], [ "print(confusion_matrix(train_y, dtree.predict(train_X)))\nprint(confusion_matrix(valid_y, dtree.predict(valid_X)))\naccuracy_score(train_y, dtree.predict(train_X)), accuracy_score(valid_y, dtree.predict(valid_X))", "[[2703 10]\n [ 20 267]]\n[[1793 14]\n [ 21 172]]\n" ], [ "print(export_text(dtree, feature_names=list(X.columns)))", "|--- Income <= 110.50\n| |--- CCAvg <= 2.95\n| | |--- class: 0\n| |--- CCAvg > 2.95\n| | |--- CD Account <= 0.50\n| | | |--- Income <= 92.50\n| | | | |--- class: 0\n| | | |--- Income > 92.50\n| | | | |--- Education <= 1.50\n| | | | | |--- class: 0\n| | | | |--- Education > 1.50\n| | | | | |--- class: 1\n| | |--- CD Account > 0.50\n| | | |--- class: 1\n|--- Income > 110.50\n| |--- Education <= 1.50\n| | |--- Family <= 2.50\n| | | |--- class: 0\n| | |--- Family > 2.50\n| | | |--- class: 1\n| |--- Education > 1.50\n| | |--- Income <= 116.50\n| | | |--- CCAvg <= 3.50\n| | | | |--- class: 0\n| | | |--- CCAvg > 3.50\n| | | | |--- class: 1\n| | |--- Income > 116.50\n| | | |--- class: 1\n\n" ] ] ]
[ "code", "markdown", "code", "raw", "code" ]
[ [ "code" ], [ "markdown" ], [ "code" ], [ "raw" ], [ "code", "code", "code", "code", "code", "code", "code" ] ]
d016c0843c7d6f05666083f604c92a4eef04e014
92,879
ipynb
Jupyter Notebook
1_Data_Cleaning.ipynb
oaagboro/Healthcare_Insurance_Fraud
77244aba4c1cd6b81c31eeb5be824935323e1a92
[ "CC0-1.0" ]
null
null
null
1_Data_Cleaning.ipynb
oaagboro/Healthcare_Insurance_Fraud
77244aba4c1cd6b81c31eeb5be824935323e1a92
[ "CC0-1.0" ]
null
null
null
1_Data_Cleaning.ipynb
oaagboro/Healthcare_Insurance_Fraud
77244aba4c1cd6b81c31eeb5be824935323e1a92
[ "CC0-1.0" ]
null
null
null
37.511712
267
0.349175
[ [ [ "# Data Set-up and Cleaning", "_____no_output_____" ] ], [ [ "# Standard Library Imports\nimport pandas as pd\nimport numpy as np", "_____no_output_____" ] ], [ [ "For this section, I will be concatenating all the data sets into one large dataset.", "_____no_output_____" ], [ "### Load the datasets", "_____no_output_____" ] ], [ [ "inpatient = pd.read_csv('./data/Train_Inpatientdata-1542865627584.csv')", "_____no_output_____" ], [ "outpatient = pd.read_csv('./data/Train_Outpatientdata-1542865627584.csv')", "_____no_output_____" ], [ "beneficiary = pd.read_csv('./data/Train_Beneficiarydata-1542865627584.csv')", "_____no_output_____" ], [ "fraud = pd.read_csv('./data/Train-1542865627584.csv')", "_____no_output_____" ], [ "# Increase the max display options of the columns and rows\npd.set_option('display.max_columns', 100)", "_____no_output_____" ] ], [ [ "### Inspect the first 5 rows of the datasets", "_____no_output_____" ] ], [ [ "# Inspect the first 5 rows of the inpatient claims\ninpatient.head()", "_____no_output_____" ], [ "# Inspect the first 5 rows of the outpatient claims\noutpatient.head()", "_____no_output_____" ], [ "# Inspect the first 5 rows of the beneficiary dataset\nbeneficiary.head()", "_____no_output_____" ], [ "# Inspect the first 5 rows of the fraud column\nfraud.head()", "_____no_output_____" ] ], [ [ "### Check the number of rows and columns for each dataset", "_____no_output_____" ] ], [ [ "inpatient.shape", "_____no_output_____" ], [ "outpatient.shape", "_____no_output_____" ], [ "beneficiary.shape", "_____no_output_____" ], [ "fraud.shape", "_____no_output_____" ] ], [ [ "Some columns in the inpatient dataset are not in the outpatient dataset or in the fraud (target) dataset and vice versa. In order to make sense of the data I would have to merge them together.", "_____no_output_____" ], [ "### Combine the Inpatient, Outpatient, beneficiary and fraud datasets", "_____no_output_____" ] ], [ [ "# Map the inpatient and outpatient columns, 1 for outpatient, 0 for inpatient\ninpatient[\"IsOutpatient\"] = 0\noutpatient[\"IsOutpatient\"] = 1", "_____no_output_____" ], [ "# Merging the datasets together\npatient_df = pd.concat([inpatient, outpatient],axis = 0)\npatient_df = patient_df.merge(beneficiary, how = 'left', on = 'BeneID').merge(fraud, how = 'left', on = 'Provider')\nprint(\"The shape of the dataset after merging is:\", patient_df.shape)", "The shape of the dataset after merging is: (558211, 56)\n" ], [ "# Inspect the final dataset after merging\npatient_df.head()", "_____no_output_____" ] ], [ [ "After merging the dataset, we now have a dataframe with the fraud target column.", "_____no_output_____" ] ], [ [ "patient_df.describe()", "_____no_output_____" ], [ "patient_df.dtypes", "_____no_output_____" ], [ "# Convert columns with Date attributes to Datetime datatype : \"ClaimStartDt\", \"ClaimEndDt\", \"AdmissionDt\", \"DischargeDt\", \"DOB\", \"DOD\"\npatient_df[[\"ClaimStartDt\", \"ClaimEndDt\", \"AdmissionDt\", \"DischargeDt\", \"DOB\", \"DOD\"]] = patient_df[[\"ClaimStartDt\", \"ClaimEndDt\", \"AdmissionDt\", \"DischargeDt\", \"DOB\", \"DOD\"]].apply(pd.to_datetime, format = '%Y-%m-%d', errors = 'coerce')", "_____no_output_____" ], [ "# Convert the Claims Procedure Code columns to object just as the Claims diagnoses code\npatient_df.loc[:, patient_df.columns.str.contains('ClmProcedureCode')] = patient_df.loc[:, patient_df.columns.str.contains('ClmProcedureCode')].astype(object) ", "_____no_output_____" ], [ "# Convert Race, County and State to objects\npatient_df[['Race', 'State', 'County' ]] = patient_df[['Race', 'State', 'County']].astype(object)", "_____no_output_____" ], [ "# Investigate the RenalDiseasIndicator\npatient_df['RenalDiseaseIndicator'].value_counts()", "_____no_output_____" ], [ "# Replace 'Y' with 1 in RenalDiseaseIndicator\npatient_df['RenalDiseaseIndicator'] = patient_df['RenalDiseaseIndicator'].replace({'Y': 1})", "_____no_output_____" ], [ "# Check to see if replacement worked\npatient_df['RenalDiseaseIndicator'].value_counts()", "_____no_output_____" ] ], [ [ "### Change other binary variables to 0 and 1", "_____no_output_____" ] ], [ [ "# Change the Gender column and any column having 'ChronicCond' to binary variables to 0 and 1\nchronic = patient_df.columns[patient_df.columns.str.contains(\"ChronicCond\")].tolist()\npatient_df[chronic] = patient_df[chronic].apply(lambda x: np.where(x == 2,0,1))\npatient_df['Gender'] = patient_df['Gender'].apply(lambda x: np.where(x == 2,0,1))", "_____no_output_____" ], [ "# Check to see if it changed\npatient_df['Gender'].value_counts()", "_____no_output_____" ], [ "# Checking the change\npatient_df['ChronicCond_Alzheimer'].value_counts()", "_____no_output_____" ], [ "# Check the data types again\npatient_df.dtypes", "_____no_output_____" ], [ "# Save the data as 'patients'\npatient_df.to_csv('./data/patients.csv', index=False)", "_____no_output_____" ], [ "patient_df.to_pickle('./data/patients.pkl')", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown", "markdown" ], [ "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ] ]
d016d58293c8827d94f3166e0250425714ad9e10
9,809
ipynb
Jupyter Notebook
Chapter01/Exercise1.03/Exercise1.03.ipynb
fenago/Applied_Data_Analytics
8c2abc859ff03783aeed79b82e6be910ae423949
[ "MIT" ]
null
null
null
Chapter01/Exercise1.03/Exercise1.03.ipynb
fenago/Applied_Data_Analytics
8c2abc859ff03783aeed79b82e6be910ae423949
[ "MIT" ]
null
null
null
Chapter01/Exercise1.03/Exercise1.03.ipynb
fenago/Applied_Data_Analytics
8c2abc859ff03783aeed79b82e6be910ae423949
[ "MIT" ]
2
2021-09-17T16:32:59.000Z
2021-11-18T10:35:18.000Z
9,809
9,809
0.708431
[ [ [ "# Understanding the data\n\nIn this first part, we load the data and perform some initial exploration on it. The main goal of this step is to acquire some basic knowledge about the data, how the various features are distributed, if there are missing values in it and so on.", "_____no_output_____" ] ], [ [ "### imports\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\n%matplotlib inline\n\n# load hourly data\nhourly_data = pd.read_csv('../data/hour.csv')", "_____no_output_____" ] ], [ [ "Check data format, number of missing values in the data and general statistics:", "_____no_output_____" ] ], [ [ "# print some generic statistics about the data\nprint(f\"Shape of data: {hourly_data.shape}\")\nprint(f\"Number of missing values in the data: {hourly_data.isnull().sum().sum()}\")\n\n# get statistics on the numerical columns\nhourly_data.describe().T", "_____no_output_____" ], [ "# create a copy of the original data\npreprocessed_data = hourly_data.copy()\n\n# tranform seasons\nseasons_mapping = {1: 'winter', 2: 'spring', 3: 'summer', 4: 'fall'}\npreprocessed_data['season'] = preprocessed_data['season'].apply(lambda x: seasons_mapping[x])\n\n# transform yr\nyr_mapping = {0: 2011, 1: 2012}\npreprocessed_data['yr'] = preprocessed_data['yr'].apply(lambda x: yr_mapping[x])\n\n# transform weekday\nweekday_mapping = {0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday'}\npreprocessed_data['weekday'] = preprocessed_data['weekday'].apply(lambda x: weekday_mapping[x])\n\n# transform weathersit\nweather_mapping = {1: 'clear', 2: 'cloudy', 3: 'light_rain_snow', 4: 'heavy_rain_snow'}\npreprocessed_data['weathersit'] = preprocessed_data['weathersit'].apply(lambda x: weather_mapping[x]) \n\n# transorm hum and windspeed\npreprocessed_data['hum'] = preprocessed_data['hum']*100\npreprocessed_data['windspeed'] = preprocessed_data['windspeed']*67\n\n# visualize preprocessed columns\ncols = ['season', 'yr', 'weekday', 'weathersit', 'hum', 'windspeed']\npreprocessed_data[cols].sample(10, random_state=123)", "_____no_output_____" ] ], [ [ "### Registered vs casual use analysis", "_____no_output_____" ] ], [ [ "# assert that total numer of rides is equal to the sum of registered and casual ones\nassert (preprocessed_data.casual + preprocessed_data.registered == preprocessed_data.cnt).all(), \\\n'Sum of casual and registered rides not equal to total number of rides'", "_____no_output_____" ], [ "# plot distributions of registered vs casual rides\nsns.distplot(preprocessed_data['registered'], label='registered')\nsns.distplot(preprocessed_data['casual'], label='casual')\nplt.legend()\nplt.xlabel('rides')\nplt.ylabel(\"frequency\")\nplt.title(\"Rides distributions\")\nplt.savefig('figs/rides_distributions.png', format='png')", "_____no_output_____" ], [ "# plot evolution of rides over time\nplot_data = preprocessed_data[['registered', 'casual', 'dteday']]\nax = plot_data.groupby('dteday').sum().plot(figsize=(10,6))\nax.set_xlabel(\"time\");\nax.set_ylabel(\"number of rides per day\");\n\nplt.savefig('figs/rides_daily.png', format='png')", "_____no_output_____" ], [ "# create new dataframe with necessary for plotting columns, and \n# obtain number of rides per day, by grouping over each day\nplot_data = preprocessed_data[['registered', 'casual', 'dteday']]\nplot_data = plot_data.groupby('dteday').sum()\n\n# define window for computing the rolling mean and standard deviation\nwindow = 7\nrolling_means = plot_data.rolling(window).mean()\nrolling_deviations = plot_data.rolling(window).std()\n\n# create a plot of the series, where we first plot the series of rolling means, \n# then we color the zone between the series of rolling means \n# +- 2 rolling standard deviations\nax = rolling_means.plot(figsize=(10,6))\nax.fill_between(rolling_means.index, \\\n rolling_means['registered'] + 2*rolling_deviations['registered'], \\\n rolling_means['registered'] - 2*rolling_deviations['registered'], \\\n alpha = 0.2)\nax.fill_between(rolling_means.index, \\\n rolling_means['casual'] + 2*rolling_deviations['casual'], \\\n rolling_means['casual'] - 2*rolling_deviations['casual'], \\\n alpha = 0.2)\nax.set_xlabel(\"time\");\nax.set_ylabel(\"number of rides per day\");\nplt.savefig('figs/rides_aggregated.png', format='png')", "_____no_output_____" ], [ "# select relevant columns\nplot_data = preprocessed_data[['hr', 'weekday', 'registered', 'casual']]\n\n# transform the data into a format, in number of entries are computed as count,\n# for each distinct hr, weekday and type (registered or casual)\nplot_data = plot_data.melt(id_vars=['hr', 'weekday'], var_name='type', value_name='count')\n\n# create FacetGrid object, in which a grid plot is produced. \n# As columns, we have the various days of the week,\n# as rows, the different types (registered and casual)\ngrid = sns.FacetGrid(plot_data, row='weekday', col='type', height=2.5,\\\n aspect=2.5, row_order=['Monday', 'Tuesday', \\\n 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])\n\n# populate the FacetGrid with the specific plots\ngrid.map(sns.barplot, 'hr', 'count', alpha=0.5)\ngrid.savefig('figs/weekday_hour_distributions.png', format='png')", "_____no_output_____" ], [ "# select subset of the data\nplot_data = preprocessed_data[['hr', 'season', 'registered', 'casual']]\n\n# unpivot data from wide to long format\nplot_data = plot_data.melt(id_vars=['hr', 'season'], var_name='type', \\\n value_name='count')\n\n# define FacetGrid\ngrid = sns.FacetGrid(plot_data, row='season', \\\n col='type', height=2.5, aspect=2.5, \\\n row_order=['winter', 'spring', 'summer', 'fall'])\n\n# apply plotting function to each element in the grid\ngrid.map(sns.barplot, 'hr', 'count', alpha=0.5)\n\n# save figure\ngrid.savefig('figs/exercise_1_02_a.png', format='png')", "_____no_output_____" ], [ "plot_data = preprocessed_data[['weekday', 'season', 'registered', 'casual']]\nplot_data = plot_data.melt(id_vars=['weekday', 'season'], var_name='type', value_name='count')\n\ngrid = sns.FacetGrid(plot_data, row='season', col='type', height=2.5, aspect=2.5, \n row_order=['winter', 'spring', 'summer', 'fall'])\ngrid.map(sns.barplot, 'weekday', 'count', alpha=0.5, \n order=['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'])\n\n# save figure\ngrid.savefig('figs/exercise_1_02_b.png', format='png')", "_____no_output_____" ] ], [ [ "Exercise 1.03: Estimating average registered rides", "_____no_output_____" ] ], [ [ "# compute population mean of registered rides\npopulation_mean = preprocessed_data.registered.mean()\n\n# get sample of the data (summer 2011)\nsample = preprocessed_data[(preprocessed_data.season == \"summer\") &\\\n (preprocessed_data.yr == 2011)].registered\n\n# perform t-test and compute p-value\nfrom scipy.stats import ttest_1samp\ntest_result = ttest_1samp(sample, population_mean)\nprint(f\"Test statistic: {test_result[0]:.03f}, p-value: {test_result[1]:.03f}\")\n\n# get sample as 5% of the full data\nimport random\nrandom.seed(111)\nsample_unbiased = preprocessed_data.registered.sample(frac=0.05)\ntest_result_unbiased = ttest_1samp(sample_unbiased, population_mean)\nprint(f\"Unbiased test statistic: {test_result_unbiased[0]:.03f}, p-value: {test_result_unbiased[1]:.03f}\")", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]
d016e30cc7fe464e2a1f67e6c936215c10454293
24,052
ipynb
Jupyter Notebook
notebook/finetune-to-livedoor-corpus.ipynb
minhpqn/bert-japanese
831eca98b2b51f8084cf00d7efe2b8e5176fe7fb
[ "Apache-2.0" ]
2
2019-03-21T16:22:38.000Z
2019-03-21T16:22:56.000Z
notebook/finetune-to-livedoor-corpus.ipynb
iki-taichi/bert-japanese
a4f170577a63bff8eb9899076dd587599f277150
[ "Apache-2.0" ]
null
null
null
notebook/finetune-to-livedoor-corpus.ipynb
iki-taichi/bert-japanese
a4f170577a63bff8eb9899076dd587599f277150
[ "Apache-2.0" ]
null
null
null
28.26322
157
0.510186
[ [ [ "# Finetuning of the pretrained Japanese BERT model\n\nFinetune the pretrained model to solve multi-class classification problems. \nThis notebook requires the following objects:\n- trained sentencepiece model (model and vocab files)\n- pretraiend Japanese BERT model\n\nDataset is livedoor ニュースコーパス in https://www.rondhuit.com/download.html. \nWe make test:dev:train = 2:2:6 datasets.", "_____no_output_____" ], [ "Results:\n\n- Full training data\n - BERT with SentencePiece\n ```\n precision recall f1-score support\n\n dokujo-tsushin 0.98 0.94 0.96 178\n it-life-hack 0.96 0.97 0.96 172\n kaden-channel 0.99 0.98 0.99 176\n livedoor-homme 0.98 0.88 0.93 95\n movie-enter 0.96 0.99 0.98 158\n peachy 0.94 0.98 0.96 174\n smax 0.98 0.99 0.99 167\n sports-watch 0.98 1.00 0.99 190\n topic-news 0.99 0.98 0.98 163\n\n micro avg 0.97 0.97 0.97 1473\n macro avg 0.97 0.97 0.97 1473\n weighted avg 0.97 0.97 0.97 1473\n ```\n - sklearn GradientBoostingClassifier with MeCab\n ```\n precision recall f1-score support\n\n dokujo-tsushin 0.89 0.86 0.88 178\n it-life-hack 0.91 0.90 0.91 172\n kaden-channel 0.90 0.94 0.92 176\n livedoor-homme 0.79 0.74 0.76 95\n movie-enter 0.93 0.96 0.95 158\n peachy 0.87 0.92 0.89 174\n smax 0.99 1.00 1.00 167\n sports-watch 0.93 0.98 0.96 190\n topic-news 0.96 0.86 0.91 163\n\n micro avg 0.92 0.92 0.92 1473\n macro avg 0.91 0.91 0.91 1473\n weighted avg 0.92 0.92 0.91 1473\n ```\n\n- Small training data (1/5 of full training data)\n - BERT with SentencePiece\n ```\n precision recall f1-score support\n\n dokujo-tsushin 0.97 0.87 0.92 178\n it-life-hack 0.86 0.86 0.86 172\n kaden-channel 0.95 0.94 0.95 176\n livedoor-homme 0.82 0.82 0.82 95\n movie-enter 0.97 0.99 0.98 158\n peachy 0.89 0.95 0.92 174\n smax 0.94 0.96 0.95 167\n sports-watch 0.97 0.97 0.97 190\n topic-news 0.94 0.94 0.94 163\n\n micro avg 0.93 0.93 0.93 1473\n macro avg 0.92 0.92 0.92 1473\n weighted avg 0.93 0.93 0.93 1473\n ```\n - sklearn GradientBoostingClassifier with MeCab\n ```\n precision recall f1-score support\n\n dokujo-tsushin 0.82 0.71 0.76 178\n it-life-hack 0.86 0.88 0.87 172\n kaden-channel 0.91 0.87 0.89 176\n livedoor-homme 0.67 0.63 0.65 95\n movie-enter 0.87 0.95 0.91 158\n peachy 0.70 0.78 0.73 174\n smax 1.00 1.00 1.00 167\n sports-watch 0.87 0.95 0.91 190\n topic-news 0.92 0.82 0.87 163\n\n micro avg 0.85 0.85 0.85 1473\n macro avg 0.85 0.84 0.84 1473\n weighted avg 0.86 0.85 0.85 1473\n ```", "_____no_output_____" ] ], [ [ "import configparser\nimport glob\nimport os\nimport pandas as pd\nimport subprocess\nimport sys\nimport tarfile \nfrom urllib.request import urlretrieve\n\nCURDIR = os.getcwd()\nCONFIGPATH = os.path.join(CURDIR, os.pardir, 'config.ini')\nconfig = configparser.ConfigParser()\nconfig.read(CONFIGPATH)", "_____no_output_____" ] ], [ [ "## Data preparing\n\nYou need execute the following cells just once.", "_____no_output_____" ] ], [ [ "FILEURL = config['FINETUNING-DATA']['FILEURL']\nFILEPATH = config['FINETUNING-DATA']['FILEPATH']\nEXTRACTDIR = config['FINETUNING-DATA']['TEXTDIR']", "_____no_output_____" ] ], [ [ "Download and unzip data.", "_____no_output_____" ] ], [ [ "%%time\n\nurlretrieve(FILEURL, FILEPATH)\n\nmode = \"r:gz\"\ntar = tarfile.open(FILEPATH, mode) \ntar.extractall(EXTRACTDIR) \ntar.close()", "_____no_output_____" ] ], [ [ "Data preprocessing.", "_____no_output_____" ] ], [ [ "def extract_txt(filename):\n with open(filename) as text_file:\n # 0: URL, 1: timestamp\n text = text_file.readlines()[2:]\n text = [sentence.strip() for sentence in text]\n text = list(filter(lambda line: line != '', text))\n return ''.join(text)", "_____no_output_____" ], [ "categories = [ \n name for name \n in os.listdir( os.path.join(EXTRACTDIR, \"text\") ) \n if os.path.isdir( os.path.join(EXTRACTDIR, \"text\", name) ) ]\n\ncategories = sorted(categories)", "_____no_output_____" ], [ "categories", "_____no_output_____" ], [ "table = str.maketrans({\n '\\n': '',\n '\\t': ' ',\n '\\r': '',\n})", "_____no_output_____" ], [ "%%time\n\nall_text = []\nall_label = []\n\nfor cat in categories:\n files = glob.glob(os.path.join(EXTRACTDIR, \"text\", cat, \"{}*.txt\".format(cat)))\n files = sorted(files)\n body = [ extract_txt(elem).translate(table) for elem in files ]\n label = [cat] * len(body)\n \n all_text.extend(body)\n all_label.extend(label)", "_____no_output_____" ], [ "df = pd.DataFrame({'text' : all_text, 'label' : all_label})", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ], [ "df = df.sample(frac=1, random_state=23).reset_index(drop=True)", "_____no_output_____" ], [ "df.head()", "_____no_output_____" ] ], [ [ "Save data as tsv files. \ntest:dev:train = 2:2:6. To check the usability of finetuning, we also prepare sampled training data (1/5 of full training data).", "_____no_output_____" ] ], [ [ "df[:len(df) // 5].to_csv( os.path.join(EXTRACTDIR, \"test.tsv\"), sep='\\t', index=False)\ndf[len(df) // 5:len(df)*2 // 5].to_csv( os.path.join(EXTRACTDIR, \"dev.tsv\"), sep='\\t', index=False)\ndf[len(df)*2 // 5:].to_csv( os.path.join(EXTRACTDIR, \"train.tsv\"), sep='\\t', index=False)\n\n### 1/5 of full training data.\n# df[:len(df) // 5].to_csv( os.path.join(EXTRACTDIR, \"test.tsv\"), sep='\\t', index=False)\n# df[len(df) // 5:len(df)*2 // 5].to_csv( os.path.join(EXTRACTDIR, \"dev.tsv\"), sep='\\t', index=False)\n# df[len(df)*2 // 5:].sample(frac=0.2, random_state=23).to_csv( os.path.join(EXTRACTDIR, \"train.tsv\"), sep='\\t', index=False)", "_____no_output_____" ] ], [ [ "## Finetune pre-trained model\n\nIt will take a lot of hours to execute the following cells on CPU environment. \nYou can also use colab to recieve the power of TPU. You need to uplode the created data onto your GCS bucket.\n\n[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1zZH2GWe0U-7GjJ2w2duodFfEUptvHjcx)", "_____no_output_____" ] ], [ [ "PRETRAINED_MODEL_PATH = '../model/model.ckpt-1400000'\nFINETUNE_OUTPUT_DIR = '../model/livedoor_output'", "_____no_output_____" ], [ "%%time\n# It will take many hours on CPU environment.\n\n!python3 ../src/run_classifier.py \\\n --task_name=livedoor \\\n --do_train=true \\\n --do_eval=true \\\n --data_dir=../data/livedoor \\\n --model_file=../model/wiki-ja.model \\\n --vocab_file=../model/wiki-ja.vocab \\\n --init_checkpoint={PRETRAINED_MODEL_PATH} \\\n --max_seq_length=512 \\\n --train_batch_size=4 \\\n --learning_rate=2e-5 \\\n --num_train_epochs=10 \\\n --output_dir={FINETUNE_OUTPUT_DIR}", "_____no_output_____" ] ], [ [ "## Predict using the finetuned model\n\nLet's predict test data using the finetuned model. ", "_____no_output_____" ] ], [ [ "import sys\nsys.path.append(\"../src\")\n\nimport tokenization_sentencepiece as tokenization\nfrom run_classifier import LivedoorProcessor\nfrom run_classifier import model_fn_builder\nfrom run_classifier import file_based_input_fn_builder\nfrom run_classifier import file_based_convert_examples_to_features\nfrom utils import str_to_value", "_____no_output_____" ], [ "sys.path.append(\"../bert\")\n\nimport modeling\nimport optimization\nimport tensorflow as tf", "_____no_output_____" ], [ "import configparser\nimport json\nimport glob\nimport os\nimport pandas as pd\nimport tempfile\n\nbert_config_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', suffix='.json')\nbert_config_file.write(json.dumps({k:str_to_value(v) for k,v in config['BERT-CONFIG'].items()}))\nbert_config_file.seek(0)\nbert_config = modeling.BertConfig.from_json_file(bert_config_file.name)", "_____no_output_____" ], [ "output_ckpts = glob.glob(\"{}/model.ckpt*data*\".format(FINETUNE_OUTPUT_DIR))\nlatest_ckpt = sorted(output_ckpts)[-1]\nFINETUNED_MODEL_PATH = latest_ckpt.split('.data-00000-of-00001')[0]", "_____no_output_____" ], [ "class FLAGS(object):\n '''Parameters.'''\n def __init__(self):\n self.model_file = \"../model/wiki-ja.model\"\n self.vocab_file = \"../model/wiki-ja.vocab\"\n self.do_lower_case = True\n self.use_tpu = False\n self.output_dir = \"/dummy\"\n self.data_dir = \"../data/livedoor\"\n self.max_seq_length = 512\n self.init_checkpoint = FINETUNED_MODEL_PATH\n self.predict_batch_size = 4\n \n # The following parameters are not used in predictions.\n # Just use to create RunConfig.\n self.master = None\n self.save_checkpoints_steps = 1\n self.iterations_per_loop = 1\n self.num_tpu_cores = 1\n self.learning_rate = 0\n self.num_warmup_steps = 0\n self.num_train_steps = 0\n self.train_batch_size = 0\n self.eval_batch_size = 0", "_____no_output_____" ], [ "FLAGS = FLAGS()", "_____no_output_____" ], [ "processor = LivedoorProcessor()\nlabel_list = processor.get_labels()", "_____no_output_____" ], [ "tokenizer = tokenization.FullTokenizer(\n model_file=FLAGS.model_file, vocab_file=FLAGS.vocab_file,\n do_lower_case=FLAGS.do_lower_case)\n\ntpu_cluster_resolver = None\n\nis_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n\nrun_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))", "_____no_output_____" ], [ "model_fn = model_fn_builder(\n bert_config=bert_config,\n num_labels=len(label_list),\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=FLAGS.num_train_steps,\n num_warmup_steps=FLAGS.num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n\nestimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n eval_batch_size=FLAGS.eval_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)", "_____no_output_____" ], [ "predict_examples = processor.get_test_examples(FLAGS.data_dir)\npredict_file = tempfile.NamedTemporaryFile(mode='w+t', encoding='utf-8', suffix='.tf_record')\n\nfile_based_convert_examples_to_features(predict_examples, label_list,\n FLAGS.max_seq_length, tokenizer,\n predict_file.name)\n\npredict_drop_remainder = True if FLAGS.use_tpu else False\n\npredict_input_fn = file_based_input_fn_builder(\n input_file=predict_file.name,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=predict_drop_remainder)", "_____no_output_____" ], [ "result = estimator.predict(input_fn=predict_input_fn)", "_____no_output_____" ], [ "%%time\n# It will take a few hours on CPU environment.\n\nresult = list(result)", "_____no_output_____" ], [ "result[:2]", "_____no_output_____" ] ], [ [ "Read test data set and add prediction results.", "_____no_output_____" ] ], [ [ "import pandas as pd", "_____no_output_____" ], [ "test_df = pd.read_csv(\"../data/livedoor/test.tsv\", sep='\\t')", "_____no_output_____" ], [ "test_df['predict'] = [ label_list[elem['probabilities'].argmax()] for elem in result ]", "_____no_output_____" ], [ "test_df.head()", "_____no_output_____" ], [ "sum( test_df['label'] == test_df['predict'] ) / len(test_df)", "_____no_output_____" ] ], [ [ "A littel more detailed check using `sklearn.metrics`.", "_____no_output_____" ] ], [ [ "!pip install scikit-learn", "_____no_output_____" ], [ "from sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix", "_____no_output_____" ], [ "print(classification_report(test_df['label'], test_df['predict']))", "_____no_output_____" ], [ "print(confusion_matrix(test_df['label'], test_df['predict']))", "_____no_output_____" ] ], [ [ "### Simple baseline model.", "_____no_output_____" ] ], [ [ "import pandas as pd\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix", "_____no_output_____" ], [ "train_df = pd.read_csv(\"../data/livedoor/train.tsv\", sep='\\t')\ndev_df = pd.read_csv(\"../data/livedoor/dev.tsv\", sep='\\t')\ntest_df = pd.read_csv(\"../data/livedoor/test.tsv\", sep='\\t')", "_____no_output_____" ], [ "!apt-get install -q -y mecab libmecab-dev mecab-ipadic mecab-ipadic-utf8", "_____no_output_____" ], [ "!pip install mecab-python3==0.7", "_____no_output_____" ], [ "from sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.ensemble import GradientBoostingClassifier\nimport MeCab", "_____no_output_____" ], [ "m = MeCab.Tagger(\"-Owakati\")", "_____no_output_____" ], [ "train_dev_df = pd.concat([train_df, dev_df])", "_____no_output_____" ], [ "train_dev_xs = train_dev_df['text'].apply(lambda x: m.parse(x))\ntrain_dev_ys = train_dev_df['label']\n\ntest_xs = test_df['text'].apply(lambda x: m.parse(x))\ntest_ys = test_df['label']", "_____no_output_____" ], [ "vectorizer = TfidfVectorizer(max_features=750)\ntrain_dev_xs_ = vectorizer.fit_transform(train_dev_xs)\ntest_xs_ = vectorizer.transform(test_xs)", "_____no_output_____" ] ], [ [ "The following set up is not exactly identical to that of BERT because inside Classifier it uses `train_test_split` with shuffle. \nIn addition, parameters are not well tuned, however, we think it's enough to check the power of BERT.", "_____no_output_____" ] ], [ [ "%%time\n\nmodel = GradientBoostingClassifier(n_estimators=200,\n validation_fraction=len(train_df)/len(dev_df),\n n_iter_no_change=5,\n tol=0.01,\n random_state=23)\n\n### 1/5 of full training data.\n# model = GradientBoostingClassifier(n_estimators=200,\n# validation_fraction=len(dev_df)/len(train_df),\n# n_iter_no_change=5,\n# tol=0.01,\n# random_state=23)\n\nmodel.fit(train_dev_xs_, train_dev_ys)", "_____no_output_____" ], [ "print(classification_report(test_ys, model.predict(test_xs_)))", "_____no_output_____" ], [ "print(confusion_matrix(test_ys, model.predict(test_xs_)))", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code" ] ]
d016e3434d72d1a8dc27cac89a3334126e826238
44,198
ipynb
Jupyter Notebook
traffic_signs_rec.ipynb
ngachago/traffic-signs
a06615b5ef6659c1f725b321f29eaab6b6576129
[ "MIT" ]
null
null
null
traffic_signs_rec.ipynb
ngachago/traffic-signs
a06615b5ef6659c1f725b321f29eaab6b6576129
[ "MIT" ]
null
null
null
traffic_signs_rec.ipynb
ngachago/traffic-signs
a06615b5ef6659c1f725b321f29eaab6b6576129
[ "MIT" ]
null
null
null
121.090411
18,164
0.858003
[ [ [ "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom PIL import Image\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.utils import to_categorical\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\nfrom sklearn.metrics import accuracy_score\nfrom tensorflow.keras import callbacks", "_____no_output_____" ], [ "data = []\nlabels = []\nclasses = 43\ncur_path = os.getcwd()\n\nfor i in range(classes):\n path = os.path.join(cur_path, 'data/Train', str(i))\n# print(path)\n images = os.listdir(path)\n \n for a in images:\n try:\n image = Image.open(path + '\\\\' + a)\n image = image.resize((30, 30))\n image = np.array(image)\n #sim = Image.fromarray(image)\n data.append(image)\n labels.append(i)\n except:\n print(\"Error loading image\")\n \ndata = np.array(data)\nlabels = np.array(labels)\n\n", "_____no_output_____" ], [ "print(data.shape)\nprint (labels.shape)", "(39209, 30, 30, 3)\n(39209,)\n" ], [ "X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)\nprint(X_train.shape, X_test.shape, y_train.shape, y_test.shape)", "(31367, 30, 30, 3) (7842, 30, 30, 3) (31367,) (7842,)\n" ], [ "y_train = to_categorical(y_train, 43)\ny_test = to_categorical(y_test, 43)", "_____no_output_____" ], [ "print(y_train[1])", "[0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.\n 0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]\n" ], [ "# Build a CNN model\n\nmodel = Sequential()\nmodel.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))\nmodel.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\nmodel.add(Dropout(rate=0.25))\nmodel.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu'))\nmodel.add(Conv2D(filters=64, kernel_size=(3,3), activation='relu'))\nmodel.add(MaxPool2D(pool_size=(2,2)))\nmodel.add(Dropout(rate=0.25))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu'))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(43, activation='softmax'))", "_____no_output_____" ], [ "# Compile the model\n\nmodel.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])", "_____no_output_____" ], [ "# Train and validate the model\n\nepochs = 15\nhistory = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_split=0.2)", "Epoch 1/15\n785/785 [==============================] - 54s 67ms/step - loss: 2.1882 - accuracy: 0.4653 - val_loss: 0.5926 - val_accuracy: 0.8604\nEpoch 2/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.7583 - accuracy: 0.7811 - val_loss: 0.2336 - val_accuracy: 0.9413\nEpoch 3/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.4730 - accuracy: 0.8633 - val_loss: 0.1567 - val_accuracy: 0.9582\nEpoch 4/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.3630 - accuracy: 0.8955 - val_loss: 0.1178 - val_accuracy: 0.9676\nEpoch 5/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.2947 - accuracy: 0.9160 - val_loss: 0.0913 - val_accuracy: 0.9750\nEpoch 6/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.2789 - accuracy: 0.9213 - val_loss: 0.1012 - val_accuracy: 0.9707\nEpoch 7/15\n785/785 [==============================] - 53s 68ms/step - loss: 0.2628 - accuracy: 0.9286 - val_loss: 0.0895 - val_accuracy: 0.9775\nEpoch 8/15\n785/785 [==============================] - 53s 68ms/step - loss: 0.2483 - accuracy: 0.9332 - val_loss: 0.0664 - val_accuracy: 0.9791\nEpoch 9/15\n785/785 [==============================] - 54s 68ms/step - loss: 0.2340 - accuracy: 0.9369 - val_loss: 0.0847 - val_accuracy: 0.9780\nEpoch 10/15\n785/785 [==============================] - 53s 68ms/step - loss: 0.2557 - accuracy: 0.9331 - val_loss: 0.0606 - val_accuracy: 0.9828\nEpoch 11/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.2434 - accuracy: 0.9349 - val_loss: 0.0778 - val_accuracy: 0.9802\nEpoch 12/15\n785/785 [==============================] - 53s 67ms/step - loss: 0.2183 - accuracy: 0.9457 - val_loss: 0.0963 - val_accuracy: 0.9748\nEpoch 13/15\n785/785 [==============================] - 52s 67ms/step - loss: 0.2161 - accuracy: 0.9421 - val_loss: 0.0567 - val_accuracy: 0.9853\nEpoch 14/15\n785/785 [==============================] - 52s 67ms/step - loss: 0.2036 - accuracy: 0.9488 - val_loss: 0.0440 - val_accuracy: 0.9901\nEpoch 15/15\n785/785 [==============================] - 52s 67ms/step - loss: 0.2032 - accuracy: 0.9475 - val_loss: 0.0664 - val_accuracy: 0.9815\n" ], [ "# Plot accuracy and loss\n\nplt.figure(0)\nplt.plot(history.history['accuracy'], label='training accuracy')\nplt.plot(history.history['val_accuracy'], label='val accuracy')\nplt.title('Accuracy')\nplt.xlabel('epochs')\nplt.ylabel('accuracy')\nplt.legend()\n\nplt.figure(1)\nplt.plot(history.history['loss'], label='training loss')\nplt.plot(history.history['val_loss'], label='val loss')\nplt.title('Loss')\nplt.xlabel('epochs')\nplt.ylabel('loss')\nplt.legend()", "_____no_output_____" ], [ "y_test = pd.read_csv('data/Test.csv')\n\nlabels = y_test['ClassId'].values\nimgs = y_test['Path'].values\ndata=[]\nfor img in imgs:\n path = 'data/'+img\n image = Image.open(path)\n image = image.resize((30, 30))\n data.append(np.array(image))\n\nX_test = np.array(data)\n# pred = model.predict_classes(X_test) - depricated\n\npred = np.argmax(model.predict(X_test), axis=-1)\naccuracy_score(labels, pred)", "_____no_output_____" ], [ "model.save('traffic_classifier.h5')", "_____no_output_____" ], [ " ", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d016e8dd5093cd233a5ed398b452eda0e5da45c4
8,789
ipynb
Jupyter Notebook
path_following_lateral_dynamics.ipynb
christianausb/vehicleControl
e53eef2e15da4b381344259eb9c482d711d16551
[ "MIT" ]
1
2022-01-10T08:16:51.000Z
2022-01-10T08:16:51.000Z
path_following_lateral_dynamics.ipynb
christianausb/vehicleControl
e53eef2e15da4b381344259eb9c482d711d16551
[ "MIT" ]
null
null
null
path_following_lateral_dynamics.ipynb
christianausb/vehicleControl
e53eef2e15da4b381344259eb9c482d711d16551
[ "MIT" ]
1
2021-07-16T02:34:33.000Z
2021-07-16T02:34:33.000Z
32.917603
201
0.575265
[ [ [ "import json\nimport math\nimport numpy as np\nimport openrtdynamics2.lang as dy\nimport openrtdynamics2.targets as tg\n\nfrom vehicle_lib.vehicle_lib import *", "_____no_output_____" ], [ "# load track data\nwith open(\"track_data/simple_track.json\", \"r\") as read_file:\n track_data = json.load(read_file)\n", "_____no_output_____" ], [ "\n#\n# Demo: a vehicle controlled to follow a given path\n#\n# Implemented using the code generator openrtdynamics 2 - https://pypi.org/project/openrtdynamics2/ .\n# This generates c++ code for Web Assembly to be run within the browser.\n#\n\nsystem = dy.enter_system()\n\nvelocity = dy.system_input( dy.DataTypeFloat64(1), name='velocity', default_value=6.0, value_range=[0, 25], title=\"vehicle velocity\")\nmax_lateral_velocity = dy.system_input( dy.DataTypeFloat64(1), name='max_lateral_velocity', default_value=1.0, value_range=[0, 4.0], title=\"maximal lateral velocity\")\nmax_lateral_accleration = dy.system_input( dy.DataTypeFloat64(1), name='max_lateral_accleration', default_value=2.0, value_range=[1.0, 4.0], title=\"maximal lateral acceleration\")\n\n# parameters\nwheelbase = 3.0\n\n# sampling time\nTs = 0.01\n\n# create storage for the reference path:\npath = import_path_data(track_data)\n\n# create placeholders for the plant output signals\nx = dy.signal()\ny = dy.signal()\npsi = dy.signal()\n\n# track the evolution of the closest point on the path to the vehicles position\nprojection = track_projection_on_path(path, x, y)\n\nd_star = projection['d_star'] # the distance parameter of the path describing the closest point to the vehicle\nx_r = projection['x_r'] # (x_r, y_r) the projected vehicle position on the path\ny_r = projection['y_r']\npsi_rr = projection['psi_r'] # the orientation angle (tangent of the path)\nK_r = projection['K_r'] # the curvature of the path\nDelta_l = projection['Delta_l'] # the lateral distance between vehicle and path \n\n#\n# project the vehicle velocity onto the path yielding v_star \n#\n# Used formula inside project_velocity_on_path:\n# v_star = d d_star / dt = v * cos( Delta_u ) / ( 1 - Delta_l * K(d_star) ) \n#\n\nDelta_u = dy.signal() # feedback from control\nv_star = project_velocity_on_path(velocity, Delta_u, Delta_l, K_r)\n\ndy.append_output(v_star, 'v_star')\n\n#\n# compute an enhanced (less noise) signal for the path orientation psi_r by integrating the \n# curvature profile and fusing the result with psi_rr to mitigate the integration drift.\n#\n\npsi_r, psi_r_dot = compute_path_orientation_from_curvature( Ts, v_star, psi_rr, K_r, L=1.0 )\n\ndy.append_output(psi_rr, 'psi_rr')\ndy.append_output(psi_r_dot, 'psi_r_dot')\n\n\n\n\n\n\n#\n# lateral open-loop control to realize an 'obstacle-avoiding maneuver'\n#\n# the dynamic model for the lateral distance Delta_l is \n#\n# d/dt Delta_l = u, \n#\n# meaning u is the lateral velocity to which is used to control the lateral\n# distance to the path.\n#\n\n# generate a velocity profile\nu_move_left = dy.signal_step( dy.int32(50) ) - dy.signal_step( dy.int32(200) )\nu_move_right = dy.signal_step( dy.int32(500) ) - dy.signal_step( dy.int32(350) )\n\n# apply a rate limiter to limit the acceleration\nu = dy.rate_limit( max_lateral_velocity * (u_move_left + u_move_right), Ts, dy.float64(-1) * max_lateral_accleration, max_lateral_accleration) \n\ndy.append_output(u, 'u')\n\n# internal lateral model (to verify the lateral dynamics of the simulated vehicle)\nDelta_l_mdl = dy.euler_integrator(u, Ts)\ndy.append_output(Delta_l_mdl, 'Delta_l_mdl')\n\n\n\n\n\n#\n# path tracking control\n#\n# Control of the lateral distance to the path can be performed via the augmented control\n# variable u. \n#\n# Herein, a linearization yielding the resulting lateral dynamics u --> Delta_l : 1/s is applied.\n#\n\nDelta_u << dy.asin( dy.saturate(u / velocity, -0.99, 0.99) )\ndelta_star = psi_r - psi\ndelta = delta_star + Delta_u\ndelta = dy.unwrap_angle(angle=delta, normalize_around_zero = True)\n\ndy.append_output(Delta_u, 'Delta_u')\ndy.append_output(delta_star, 'delta_star')\n\n\n#\n# The model of the vehicle including a disturbance\n#\n\n\n# steering angle limit\ndelta = dy.saturate(u=delta, lower_limit=-math.pi/2.0, upper_limit=math.pi/2.0)\n\n# the model of the vehicle\nx_, y_, psi_, x_dot, y_dot, psi_dot = discrete_time_bicycle_model(delta, velocity, Ts, wheelbase)\n\n# close the feedback loops\nx << x_\ny << y_\npsi << psi_\n\n\n\n#\n# outputs: these are available for visualization in the html set-up\n#\n\ndy.append_output(x, 'x')\ndy.append_output(y, 'y')\ndy.append_output(psi, 'psi')\n\ndy.append_output(delta, 'steering')\n\ndy.append_output(x_r, 'x_r')\ndy.append_output(y_r, 'y_r')\ndy.append_output(psi_r, 'psi_r')\n\ndy.append_output(Delta_l, 'Delta_l')\n\n\n\n\n# generate code for Web Assembly (wasm), requires emcc (emscripten) to build\ncode_gen_results = dy.generate_code(template=tg.TargetCppWASM(), folder=\"generated/path_following_lateral_dynamics\", build=True)\n\n#\ndy.clear()\n\n", "compiling system Sys1000_optim_loop (level 1)... \ncompiling system simulation (level 0)... \nGenerated code will be written to generated/path_following_lateral_dynamics .\nwriting file generated/path_following_lateral_dynamics/simulation_manifest.json\nwriting file generated/path_following_lateral_dynamics/main.cpp\nRunning compiler: emcc --bind -s MODULARIZE=1 -s EXPORT_NAME=\"ORTD_simulator\" generated/path_following_lateral_dynamics/main.cpp -O2 -s -o generated/path_following_lateral_dynamics/main.js\nCompilation result: 0\n" ], [ "import IPython\nIPython.display.IFrame(src='../vehicle_control_tutorial/path_following_lateral_dynamics.html', width='100%', height=1000)", "_____no_output_____" ] ] ]
[ "code" ]
[ [ "code", "code", "code", "code" ] ]
d016f1af997026bff50611dbb6946f98d681cd81
38,984
ipynb
Jupyter Notebook
site/en/guide/mixed_precision.ipynb
DorianKodelja/docs
186899c6252048b5a4f5cf89cc33e4dcc8426e5f
[ "Apache-2.0" ]
3
2020-09-23T14:09:41.000Z
2020-09-23T19:26:32.000Z
site/en/guide/mixed_precision.ipynb
DorianKodelja/docs
186899c6252048b5a4f5cf89cc33e4dcc8426e5f
[ "Apache-2.0" ]
1
2021-02-23T20:17:39.000Z
2021-02-23T20:17:39.000Z
site/en/guide/mixed_precision.ipynb
DorianKodelja/docs
186899c6252048b5a4f5cf89cc33e4dcc8426e5f
[ "Apache-2.0" ]
null
null
null
42.792536
636
0.607916
[ [ [ "##### Copyright 2019 The TensorFlow Authors.", "_____no_output_____" ] ], [ [ "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.", "_____no_output_____" ] ], [ [ "# Mixed precision", "_____no_output_____" ], [ "<table class=\"tfo-notebook-buttons\" align=\"left\">\n <td>\n <a target=\"_blank\" href=\"https://www.tensorflow.org/guide/mixed_precision\"><img src=\"https://www.tensorflow.org/images/tf_logo_32px.png\" />View on TensorFlow.org</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/mixed_precision.ipynb\"><img src=\"https://www.tensorflow.org/images/colab_logo_32px.png\" />Run in Google Colab</a>\n </td>\n <td>\n <a target=\"_blank\" href=\"https://github.com/tensorflow/docs/blob/master/site/en/guide/mixed_precision.ipynb\"><img src=\"https://www.tensorflow.org/images/GitHub-Mark-32px.png\" />View source on GitHub</a>\n </td>\n <td>\n <a href=\"https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/mixed_precision.ipynb\"><img src=\"https://www.tensorflow.org/images/download_logo_32px.png\" />Download notebook</a>\n </td>\n</table>", "_____no_output_____" ], [ "## Overview\n\nMixed precision is the use of both 16-bit and 32-bit floating-point types in a model during training to make it run faster and use less memory. By keeping certain parts of the model in the 32-bit types for numeric stability, the model will have a lower step time and train equally as well in terms of the evaluation metrics such as accuracy. This guide describes how to use the experimental Keras mixed precision API to speed up your models. Using this API can improve performance by more than 3 times on modern GPUs and 60% on TPUs.", "_____no_output_____" ], [ "Note: The Keras mixed precision API is currently experimental and may change.", "_____no_output_____" ], [ "Today, most models use the float32 dtype, which takes 32 bits of memory. However, there are two lower-precision dtypes, float16 and bfloat16, each which take 16 bits of memory instead. Modern accelerators can run operations faster in the 16-bit dtypes, as they have specialized hardware to run 16-bit computations and 16-bit dtypes can be read from memory faster.\n\nNVIDIA GPUs can run operations in float16 faster than in float32, and TPUs can run operations in bfloat16 faster than float32. Therefore, these lower-precision dtypes should be used whenever possible on those devices. However, variables and a few computations should still be in float32 for numeric reasons so that the model trains to the same quality. The Keras mixed precision API allows you to use a mix of either float16 or bfloat16 with float32, to get the performance benefits from float16/bfloat16 and the numeric stability benefits from float32.\n\nNote: In this guide, the term \"numeric stability\" refers to how a model's quality is affected by the use of a lower-precision dtype instead of a higher precision dtype. We say an operation is \"numerically unstable\" in float16 or bfloat16 if running it in one of those dtypes causes the model to have worse evaluation accuracy or other metrics compared to running the operation in float32.", "_____no_output_____" ], [ "## Setup", "_____no_output_____" ], [ "The Keras mixed precision API is available in TensorFlow 2.1.", "_____no_output_____" ] ], [ [ "import tensorflow as tf\n\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.mixed_precision import experimental as mixed_precision", "_____no_output_____" ] ], [ [ "## Supported hardware\n\nWhile mixed precision will run on most hardware, it will only speed up models on recent NVIDIA GPUs and Cloud TPUs. NVIDIA GPUs support using a mix of float16 and float32, while TPUs support a mix of bfloat16 and float32.\n\nAmong NVIDIA GPUs, those with compute capability 7.0 or higher will see the greatest performance benefit from mixed precision because they have special hardware units, called Tensor Cores, to accelerate float16 matrix multiplications and convolutions. Older GPUs offer no math performance benefit for using mixed precision, however memory and bandwidth savings can enable some speedups. You can look up the compute capability for your GPU at NVIDIA's [CUDA GPU web page](https://developer.nvidia.com/cuda-gpus). Examples of GPUs that will benefit most from mixed precision include RTX GPUs, the Titan V, and the V100.", "_____no_output_____" ], [ "Note: If running this guide in Google Colab, the GPU runtime typically has a P100 connected. The P100 has compute capability 6.0 and is not expected to show a significant speedup.\n\nYou can check your GPU type with the following. The command only exists if the\nNVIDIA drivers are installed, so the following will raise an error otherwise.", "_____no_output_____" ] ], [ [ "!nvidia-smi -L", "_____no_output_____" ] ], [ [ "All Cloud TPUs support bfloat16.\n\nEven on CPUs and older GPUs, where no speedup is expected, mixed precision APIs can still be used for unit testing, debugging, or just to try out the API.", "_____no_output_____" ], [ "## Setting the dtype policy", "_____no_output_____" ], [ "To use mixed precision in Keras, you need to create a `tf.keras.mixed_precision.experimental.Policy`, typically referred to as a *dtype policy*. Dtype policies specify the dtypes layers will run in. In this guide, you will construct a policy from the string `'mixed_float16'` and set it as the global policy. This will will cause subsequently created layers to use mixed precision with a mix of float16 and float32.", "_____no_output_____" ] ], [ [ "policy = mixed_precision.Policy('mixed_float16')\nmixed_precision.set_policy(policy)", "_____no_output_____" ] ], [ [ "The policy specifies two important aspects of a layer: the dtype the layer's computations are done in, and the dtype of a layer's variables. Above, you created a `mixed_float16` policy (i.e., a `mixed_precision.Policy` created by passing the string `'mixed_float16'` to its constructor). With this policy, layers use float16 computations and float32 variables. Computations are done in float16 for performance, but variables must be kept in float32 for numeric stability. You can directly query these properties of the policy.", "_____no_output_____" ] ], [ [ "print('Compute dtype: %s' % policy.compute_dtype)\nprint('Variable dtype: %s' % policy.variable_dtype)", "_____no_output_____" ] ], [ [ "As mentioned before, the `mixed_float16` policy will most significantly improve performance on NVIDIA GPUs with compute capability of at least 7.0. The policy will run on other GPUs and CPUs but may not improve performance. For TPUs, the `mixed_bfloat16` policy should be used instead.", "_____no_output_____" ], [ "## Building the model", "_____no_output_____" ], [ "Next, let's start building a simple model. Very small toy models typically do not benefit from mixed precision, because overhead from the TensorFlow runtime typically dominates the execution time, making any performance improvement on the GPU negligible. Therefore, let's build two large `Dense` layers with 4096 units each if a GPU is used.", "_____no_output_____" ] ], [ [ "inputs = keras.Input(shape=(784,), name='digits')\nif tf.config.list_physical_devices('GPU'):\n print('The model will run with 4096 units on a GPU')\n num_units = 4096\nelse:\n # Use fewer units on CPUs so the model finishes in a reasonable amount of time\n print('The model will run with 64 units on a CPU')\n num_units = 64\ndense1 = layers.Dense(num_units, activation='relu', name='dense_1')\nx = dense1(inputs)\ndense2 = layers.Dense(num_units, activation='relu', name='dense_2')\nx = dense2(x)", "_____no_output_____" ] ], [ [ "Each layer has a policy and uses the global policy by default. Each of the `Dense` layers therefore have the `mixed_float16` policy because you set the global policy to `mixed_float16` previously. This will cause the dense layers to do float16 computations and have float32 variables. They cast their inputs to float16 in order to do float16 computations, which causes their outputs to be float16 as a result. Their variables are float32 and will be cast to float16 when the layers are called to avoid errors from dtype mismatches.", "_____no_output_____" ] ], [ [ "print('x.dtype: %s' % x.dtype.name)\n# 'kernel' is dense1's variable\nprint('dense1.kernel.dtype: %s' % dense1.kernel.dtype.name)", "_____no_output_____" ] ], [ [ "Next, create the output predictions. Normally, you can create the output predictions as follows, but this is not always numerically stable with float16.", "_____no_output_____" ] ], [ [ "# INCORRECT: softmax and model output will be float16, when it should be float32\noutputs = layers.Dense(10, activation='softmax', name='predictions')(x)\nprint('Outputs dtype: %s' % outputs.dtype.name)", "_____no_output_____" ] ], [ [ "A softmax activation at the end of the model should be float32. Because the dtype policy is `mixed_float16`, the softmax activation would normally have a float16 compute dtype and output a float16 tensors.\n\nThis can be fixed by separating the Dense and softmax layers, and by passing `dtype='float32'` to the softmax layer", "_____no_output_____" ] ], [ [ "# CORRECT: softmax and model output are float32\nx = layers.Dense(10, name='dense_logits')(x)\noutputs = layers.Activation('softmax', dtype='float32', name='predictions')(x)\nprint('Outputs dtype: %s' % outputs.dtype.name)", "_____no_output_____" ] ], [ [ "Passing `dtype='float32'` to the softmax layer constructor overrides the layer's dtype policy to be the `float32` policy, which does computations and keeps variables in float32. Equivalently, we could have instead passed `dtype=mixed_precision.Policy('float32')`; layers always convert the dtype argument to a policy. Because the `Activation` layer has no variables, the policy's variable dtype is ignored, but the policy's compute dtype of float32 causes softmax and the model output to be float32. \n\n\nAdding a float16 softmax in the middle of a model is fine, but a softmax at the end of the model should be in float32. The reason is that if the intermediate tensor flowing from the softmax to the loss is float16 or bfloat16, numeric issues may occur.\n\nYou can override the dtype of any layer to be float32 by passing `dtype='float32'` if you think it will not be numerically stable with float16 computations. But typically, this is only necessary on the last layer of the model, as most layers have sufficient precision with `mixed_float16` and `mixed_bfloat16`.\n\nEven if the model does not end in a softmax, the outputs should still be float32. While unnecessary for this specific model, the model outputs can be cast to float32 with the following:", "_____no_output_____" ] ], [ [ "# The linear activation is an identity function. So this simply casts 'outputs'\n# to float32. In this particular case, 'outputs' is already float32 so this is a\n# no-op.\noutputs = layers.Activation('linear', dtype='float32')(outputs)", "_____no_output_____" ] ], [ [ "Next, finish and compile the model, and generate input data.", "_____no_output_____" ] ], [ [ "model = keras.Model(inputs=inputs, outputs=outputs)\nmodel.compile(loss='sparse_categorical_crossentropy',\n optimizer=keras.optimizers.RMSprop(),\n metrics=['accuracy'])\n\n(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\nx_train = x_train.reshape(60000, 784).astype('float32') / 255\nx_test = x_test.reshape(10000, 784).astype('float32') / 255", "_____no_output_____" ] ], [ [ "This example cast the input data from int8 to float32. We don't cast to float16 since the division by 255 is on the CPU, which runs float16 operations slower than float32 operations. In this case, the performance difference in negligible, but in general you should run input processing math in float32 if it runs on the CPU. The first layer of the model will cast the inputs to float16, as each layer casts floating-point inputs to its compute dtype.\n\nThe initial weights of the model are retrieved. This will allow training from scratch again by loading the weights.", "_____no_output_____" ] ], [ [ "initial_weights = model.get_weights()", "_____no_output_____" ] ], [ [ "## Training the model with Model.fit\n\nNext, train the model.", "_____no_output_____" ] ], [ [ "history = model.fit(x_train, y_train,\n batch_size=8192,\n epochs=5,\n validation_split=0.2)\ntest_scores = model.evaluate(x_test, y_test, verbose=2)\nprint('Test loss:', test_scores[0])\nprint('Test accuracy:', test_scores[1])\n", "_____no_output_____" ] ], [ [ "Notice the model prints the time per sample in the logs: for example, \"4us/sample\". The first epoch may be slower as TensorFlow spends some time optimizing the model, but afterwards the time per sample should stabilize. \n\nIf you are running this guide in Colab, you can compare the performance of mixed precision with float32. To do so, change the policy from `mixed_float16` to `float32` in the \"Setting the dtype policy\" section, then rerun all the cells up to this point. On GPUs with at least compute capability 7.0, you should see the time per sample significantly increase, indicating mixed precision sped up the model. For example, with a Titan V GPU, the per-sample time increases from 4us to 12us. Make sure to change the policy back to `mixed_float16` and rerun the cells before continuing with the guide.\n\nFor many real-world models, mixed precision also allows you to double the batch size without running out of memory, as float16 tensors take half the memory. This does not apply however to this toy model, as you can likely run the model in any dtype where each batch consists of the entire MNIST dataset of 60,000 images.\n\nIf running mixed precision on a TPU, you will not see as much of a performance gain compared to running mixed precision on GPUs. This is because TPUs already do certain ops in bfloat16 under the hood even with the default dtype policy of `float32`. TPU hardware does not support float32 for certain ops which are numerically stable in bfloat16, such as matmul. For such ops the TPU backend will silently use bfloat16 internally instead. As a consequence, passing `dtype='float32'` to layers which use such ops may have no numerical effect, however it is unlikely running such layers with bfloat16 computations will be harmful.", "_____no_output_____" ], [ "## Loss scaling\n\nLoss scaling is a technique which `tf.keras.Model.fit` automatically performs with the `mixed_float16` policy to avoid numeric underflow. This section describes loss scaling and how to customize its behavior.", "_____no_output_____" ], [ "### Underflow and Overflow\n\nThe float16 data type has a narrow dynamic range compared to float32. This means values above $65504$ will overflow to infinity and values below $6.0 \\times 10^{-8}$ will underflow to zero. float32 and bfloat16 have a much higher dynamic range so that overflow and underflow are not a problem.\n\nFor example:", "_____no_output_____" ] ], [ [ "x = tf.constant(256, dtype='float16')\n(x ** 2).numpy() # Overflow", "_____no_output_____" ], [ "x = tf.constant(1e-5, dtype='float16')\n(x ** 2).numpy() # Underflow", "_____no_output_____" ] ], [ [ "In practice, overflow with float16 rarely occurs. Additionally, underflow also rarely occurs during the forward pass. However, during the backward pass, gradients can underflow to zero. Loss scaling is a technique to prevent this underflow.", "_____no_output_____" ], [ "### Loss scaling background\n\nThe basic concept of loss scaling is simple: simply multiply the loss by some large number, say $1024$. We call this number the *loss scale*. This will cause the gradients to scale by $1024$ as well, greatly reducing the chance of underflow. Once the final gradients are computed, divide them by $1024$ to bring them back to their correct values.\n\nThe pseudocode for this process is:\n\n```\nloss_scale = 1024\nloss = model(inputs)\nloss *= loss_scale\n# We assume `grads` are float32. We do not want to divide float16 gradients\ngrads = compute_gradient(loss, model.trainable_variables)\ngrads /= loss_scale\n```\n\nChoosing a loss scale can be tricky. If the loss scale is too low, gradients may still underflow to zero. If too high, the opposite the problem occurs: the gradients may overflow to infinity.\n\nTo solve this, TensorFlow dynamically determines the loss scale so you do not have to choose one manually. If you use `tf.keras.Model.fit`, loss scaling is done for you so you do not have to do any extra work. This is explained further in the next section.\n", "_____no_output_____" ], [ "### Choosing the loss scale\n\nEach dtype policy optionally has an associated `tf.mixed_precision.experimental.LossScale` object, which represents a fixed or dynamic loss scale. By default, the loss scale for the `mixed_float16` policy is a `tf.mixed_precision.experimental.DynamicLossScale`, which dynamically determines the loss scale value. Other policies do not have a loss scale by default, as it is only necessary when float16 is used. You can query the loss scale of the policy:", "_____no_output_____" ] ], [ [ "loss_scale = policy.loss_scale\nprint('Loss scale: %s' % loss_scale)", "_____no_output_____" ] ], [ [ "The loss scale prints a lot of internal state, but you can ignore it. The most important part is the `current_loss_scale` part, which shows the loss scale's current value.", "_____no_output_____" ], [ "You can instead use a static loss scale by passing a number when constructing a dtype policy.", "_____no_output_____" ] ], [ [ "new_policy = mixed_precision.Policy('mixed_float16', loss_scale=1024)\nprint(new_policy.loss_scale)", "_____no_output_____" ] ], [ [ "The dtype policy constructor always converts the loss scale to a `LossScale` object. In this case, it's converted to a `tf.mixed_precision.experimental.FixedLossScale`, the only other `LossScale` subclass other than `DynamicLossScale`.", "_____no_output_____" ], [ "Note: *Using anything other than a dynamic loss scale is not recommended*. Choosing a fixed loss scale can be difficult, as making it too low will cause the model to not train as well, and making it too high will cause Infs or NaNs to appear in the gradients. A dynamic loss scale is typically near the optimal loss scale, so you do not have to do any work. Currently, dynamic loss scales are a bit slower than fixed loss scales, but the performance will be improved in the future.", "_____no_output_____" ], [ "Models, like layers, each have a dtype policy. If present, a model uses its policy's loss scale to apply loss scaling in the `tf.keras.Model.fit` method. This means if `Model.fit` is used, you do not have to worry about loss scaling at all: The `mixed_float16` policy will have a dynamic loss scale by default, and `Model.fit` will apply it.\n\nWith custom training loops, the model will ignore the policy's loss scale, and you will have to apply it manually. This is explained in the next section.", "_____no_output_____" ], [ "## Training the model with a custom training loop", "_____no_output_____" ], [ "So far, you trained a Keras model with mixed precision using `tf.keras.Model.fit`. Next, you will use mixed precision with a custom training loop. If you do not already know what a custom training loop is, please read [the Custom training guide](../tutorials/customization/custom_training_walkthrough.ipynb) first.", "_____no_output_____" ], [ "Running a custom training loop with mixed precision requires two changes over running it in float32:\n1. Build the model with mixed precision (you already did this)\n2. Explicitly use loss scaling if `mixed_float16` is used.\n", "_____no_output_____" ], [ "For step (2), you will use the `tf.keras.mixed_precision.experimental.LossScaleOptimizer` class, which wraps an optimizer and applies loss scaling. It takes two arguments: the optimizer and the loss scale. Construct one as follows to use a dynamic loss scale", "_____no_output_____" ] ], [ [ "optimizer = keras.optimizers.RMSprop()\noptimizer = mixed_precision.LossScaleOptimizer(optimizer, loss_scale='dynamic')", "_____no_output_____" ] ], [ [ "Passing `'dynamic'` is equivalent to passing `tf.mixed_precision.experimental.DynamicLossScale()`.", "_____no_output_____" ], [ "Next, define the loss object and the `tf.data.Dataset`s.", "_____no_output_____" ] ], [ [ "loss_object = tf.keras.losses.SparseCategoricalCrossentropy()\ntrain_dataset = (tf.data.Dataset.from_tensor_slices((x_train, y_train))\n .shuffle(10000).batch(8192))\ntest_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(8192)", "_____no_output_____" ] ], [ [ "Next, define the training step function. Two new methods from the loss scale optimizer are used in order to scale the loss and unscale the gradients:\n* `get_scaled_loss(loss)`: Multiplies the loss by the loss scale\n* `get_unscaled_gradients(gradients)`: Takes in a list of scaled gradients as inputs, and divides each one by the loss scale to unscale them\n\nThese functions must be used in order to prevent underflow in the gradients. `LossScaleOptimizer.apply_gradients` will then apply gradients if none of them have Infs or NaNs. It will also update the loss scale, halving it if the gradients had Infs or NaNs and potentially increasing it otherwise.", "_____no_output_____" ] ], [ [ "@tf.function\ndef train_step(x, y):\n with tf.GradientTape() as tape:\n predictions = model(x)\n loss = loss_object(y, predictions)\n scaled_loss = optimizer.get_scaled_loss(loss)\n scaled_gradients = tape.gradient(scaled_loss, model.trainable_variables)\n gradients = optimizer.get_unscaled_gradients(scaled_gradients)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss", "_____no_output_____" ] ], [ [ "The `LossScaleOptimizer` will likely skip the first few steps at the start of training. The loss scale starts out high so that the optimal loss scale can quickly be determined. After a few steps, the loss scale will stabilize and very few steps will be skipped. This process happens automatically and does not affect training quality.", "_____no_output_____" ], [ "Now define the test step.\n", "_____no_output_____" ] ], [ [ "@tf.function\ndef test_step(x):\n return model(x, training=False)", "_____no_output_____" ] ], [ [ "Load the initial weights of the model, so you can retrain from scratch.", "_____no_output_____" ] ], [ [ "model.set_weights(initial_weights)", "_____no_output_____" ] ], [ [ "Finally, run the custom training loop.", "_____no_output_____" ] ], [ [ "for epoch in range(5):\n epoch_loss_avg = tf.keras.metrics.Mean()\n test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(\n name='test_accuracy')\n for x, y in train_dataset:\n loss = train_step(x, y)\n epoch_loss_avg(loss)\n for x, y in test_dataset:\n predictions = test_step(x)\n test_accuracy.update_state(y, predictions)\n print('Epoch {}: loss={}, test accuracy={}'.format(epoch, epoch_loss_avg.result(), test_accuracy.result()))", "_____no_output_____" ] ], [ [ "## GPU performance tips\n\nHere are some performance tips when using mixed precision on GPUs.\n\n### Increasing your batch size\nIf it doesn't affect model quality, try running with double the batch size when using mixed precision. As float16 tensors use half the memory, this often allows you to double your batch size without running out of memory. Increasing batch size typically increases training throughput, i.e. the training elements per second your model can run on.\n\n### Ensuring GPU Tensor Cores are used\n\nAs mentioned previously, modern NVIDIA GPUs use a special hardware unit called Tensor Cores that can multiply float16 matrices very quickly. However, Tensor Cores requires certain dimensions of tensors to be a multiple of 8. In the examples below, an argument is bold if and only if it needs to be a multiple of 8 for Tensor Cores to be used.\n\n* tf.keras.layers.Dense(**units=64**)\n* tf.keras.layers.Conv2d(**filters=48**, kernel_size=7, stride=3)\n * And similarly for other convolutional layers, such as tf.keras.layers.Conv3d\n* tf.keras.layers.LSTM(**units=64**)\n * And similar for other RNNs, such as tf.keras.layers.GRU\n* tf.keras.Model.fit(epochs=2, **batch_size=128**)\n\nYou should try to use Tensor Cores when possible. If you want to learn more [NVIDIA deep learning performance guide](https://docs.nvidia.com/deeplearning/sdk/dl-performance-guide/index.html) describes the exact requirements for using Tensor Cores as well as other Tensor Core-related performance information.\n\n### XLA\n\nXLA is a compiler that can further increase mixed precision performance, as well as float32 performance to a lesser extent. See the [XLA guide](https://www.tensorflow.org/xla) for details.", "_____no_output_____" ], [ "## Cloud TPU performance tips\nAs on GPUs, you should try doubling your batch size, as bfloat16 tensors use half the memory. Doubling batch size may increase training throughput.\n\nTPUs do not require any other mixed precision-specific tuning to get optimal performance. TPUs already require the use of XLA. They benefit from having certain dimensions being multiples of $128$, but this applies equally to float32 as it does for mixed precision. See the [Cloud TPU Performance Guide](https://cloud.google.com/tpu/docs/performance-guide) for general TPU performance tips, which apply to mixed precision as well as float32.", "_____no_output_____" ], [ "## Summary\n* You should use mixed precision if you use TPUs or NVIDIA GPUs with at least compute capability 7.0, as it will improve performance by up to 3x.\n* You can use mixed precision with the following lines:\n ```\n # On TPUs, use 'mixed_bfloat16' instead\n policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')\n mixed_precision.set_policy(policy)\n ```\n* If your model ends in softmax, make sure it is float32. And regardless of what your model ends in, make sure the output is float32.\n* If you use a custom training loop with `mixed_float16`, in addition to the above lines, you need to wrap your optimizer with a `tf.keras.mixed_precision.experimental.LossScaleOptimizer`. Then call `optimizer.get_scaled_loss` to scale the loss, and `optimizer.get_unscaled_gradients` to unscale the gradients.\n* Double the training batch size if it does not reduce evaluation accuracy\n* On GPUs, ensure most tensor dimensions are a multiple of $8$ to maximize performance\n\nFor more examples of mixed precision using the `tf.keras.mixed_precision` API, see the [official models repository](https://github.com/tensorflow/models/tree/master/official). Most official models, such as [ResNet](https://github.com/tensorflow/models/tree/master/official/vision/image_classification) and [Transformer](https://github.com/tensorflow/models/blob/master/official/nlp/transformer) will run using mixed precision by passing `--dtype=fp16`. \n", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ] ]
d017006a65a28ce43dd0c42335a48abb818c4d62
12,140
ipynb
Jupyter Notebook
examples/getting_started/3-Tabular_Datasets.ipynb
adsbxchange/holoviews
7c06dbd63945fd66e63b17060956634be3ba17fe
[ "BSD-3-Clause" ]
2
2020-08-13T00:11:46.000Z
2021-01-31T22:13:21.000Z
examples/getting_started/3-Tabular_Datasets.ipynb
adsbxchange/holoviews
7c06dbd63945fd66e63b17060956634be3ba17fe
[ "BSD-3-Clause" ]
null
null
null
examples/getting_started/3-Tabular_Datasets.ipynb
adsbxchange/holoviews
7c06dbd63945fd66e63b17060956634be3ba17fe
[ "BSD-3-Clause" ]
null
null
null
53.245614
659
0.679489
[ [ [ "# Tabular Datasets", "_____no_output_____" ], [ "As we have already discovered, Elements are simple wrappers around your data that provide a semantically meaningful representation. HoloViews can work with a wide variety of data types, but many of them can be categorized as either:\n\n * **Tabular:** Tables of flat columns, or\n * **Gridded:** Array-like data on 2-dimensional or N-dimensional grids\n \nThese two general data types are explained in detail in the [Tabular Data](../user_guide/07-Tabular_Datasets.ipynb) and [Gridded Data](../user_guide/08-Gridded_Datasets.ipynb) user guides, including all the many supported formats (including Python dictionaries of NumPy arrays, pandas ``DataFrames``, dask ``DataFrames``, and xarray ``DataArrays`` and ``Datasets``). \n\nIn this Getting-Started guide we provide a quick overview and introduction to two of the most flexible and powerful formats: columnar **pandas** DataFrames (in this section), and gridded **xarray** Datasets (in the next section).", "_____no_output_____" ], [ "## Tabular\n\nTabular data (also called columnar data) is one of the most common, general, and versatile data formats, corresponding to how data is laid out in a spreadsheet. There are many different ways to put data into a tabular format, but for interactive analysis having [**tidy data**](http://www.jeannicholashould.com/tidy-data-in-python.html) provides flexibility and simplicity. For tidy data, the **columns** of the table represent **variables** or **dimensions** and the **rows** represent **observations**. The best way to understand this format is to look at such a dataset:", "_____no_output_____" ] ], [ [ "import numpy as np\nimport pandas as pd\nimport holoviews as hv\nhv.extension('bokeh', 'matplotlib')", "_____no_output_____" ], [ "diseases = pd.read_csv('../assets/diseases.csv.gz')\ndiseases.head()", "_____no_output_____" ] ], [ [ "This particular dataset was the subject of an excellent piece of visual journalism in the [Wall Street Journal](http://graphics.wsj.com/infectious-diseases-and-vaccines/#b02g20t20w15). The WSJ data details the incidence of various diseases over time, and was downloaded from the [University of Pittsburgh's Project Tycho](http://www.tycho.pitt.edu/). We can see we have 5 data columns, which each correspond either to independent variables that specify a particular measurement ('Year', 'Week', 'State'), or observed/dependent variables reporting what was then actually measured (the 'measles' or 'pertussis' incidence). \n\nKnowing the distinction between those two types of variables is crucial for doing visualizations, but unfortunately the tabular format does not declare this information. Plotting 'Week' against 'State' would not be meaningful, whereas 'measles' for each 'State' (averaging or summing across the other dimensions) would be fine, and there's no way to deduce those constraints from the tabular format. Accordingly, we will first make a HoloViews object called a ``Dataset`` that declares the independent variables (called key dimensions or **kdims** in HoloViews) and dependent variables (called value dimensions or **vdims**) that you want to work with:", "_____no_output_____" ] ], [ [ "vdims = [('measles', 'Measles Incidence'), ('pertussis', 'Pertussis Incidence')]\nds = hv.Dataset(diseases, ['Year', 'State'], vdims)", "_____no_output_____" ] ], [ [ "Here we've used an optional tuple-based syntax **``(name,label)``** to specify a more meaningful description for the ``vdims``, while using the original short descriptions for the ``kdims``. We haven't yet specified what to do with the ``Week`` dimension, but we are only interested in yearly averages, so let's just tell HoloViews to average over all remaining dimensions:", "_____no_output_____" ] ], [ [ "ds = ds.aggregate(function=np.mean)\nds", "_____no_output_____" ] ], [ [ "(We'll cover aggregations like ``np.mean`` in detail later, but here the important bit is simply that the ``Week`` dimension can now be ignored.)\n\nThe ``repr`` shows us both the ``kdims`` (in square brackets) and the ``vdims`` (in parentheses) of the ``Dataset``. Because it can hold arbitrary combinations of dimensions, a ``Dataset`` is *not* immediately visualizable. There's no single clear mapping from these four dimensions onto a two-dimensional page, hence the textual representation shown above.\n\nTo make this data visualizable, we'll need to provide a bit more metadata, by selecting one of the large library of Elements that can help answer the questions we want to ask about the data. Perhaps the most obvious representation of this dataset is as a ``Curve`` displaying the incidence for each year, for each state. We could pull out individual columns one by one from the original dataset, but now that we have declared information about the dimensions, the cleanest approach is to map the dimensions of our ``Dataset`` onto the dimensions of an Element using ``.to``:", "_____no_output_____" ] ], [ [ "%%opts Curve [width=600 height=250] {+framewise}\n(ds.to(hv.Curve, 'Year', 'measles') + ds.to(hv.Curve, 'Year', 'pertussis')).cols(1)", "_____no_output_____" ] ], [ [ "Here we specified two ``Curve`` elements showing measles and pertussis incidence respectively (the vdims), per year (the kdim), and laid them out in a vertical column. You'll notice that even though we specified only the short name for the value dimensions, the plot shows the longer names (\"Measles Incidence\", \"Pertussis Incidence\") that we declared on the ``Dataset``.\n\nYou'll also notice that we automatically received a dropdown menu to select which ``State`` to view. Each ``Curve`` ignores unused value dimensions, because additional measurements don't affect each other, but HoloViews has to do *something* with every key dimension for every such plot. If the ``State`` (or any other key dimension) isn't somehow plotted or aggregated over, then HoloViews has to leave choosing a value for it to the user, hence the selection widget. Other options for what to do with extra dimensions or just extra data ranges are illustrated below.", "_____no_output_____" ], [ "### Selecting\n\nOne of the most common thing we might want to do is to select only a subset of the data. The ``select`` method makes this extremely easy, letting you select a single value, a list of values supplied as a list, or a range of values supplied as a tuple. Here we will use ``select`` to display the measles incidence in four states over one decade. After applying the selection, we use the ``.to`` method as shown earlier, now displaying the data as ``Bars`` indexed by 'Year' and 'State' key dimensions and displaying the 'Measles Incidence' value dimension:", "_____no_output_____" ] ], [ [ "%%opts Bars [width=800 height=400 tools=['hover'] group_index=1 legend_position='top_left']\nstates = ['New York', 'New Jersey', 'California', 'Texas']\nds.select(State=states, Year=(1980, 1990)).to(hv.Bars, ['Year', 'State'], 'measles').sort()", "_____no_output_____" ] ], [ [ "### Faceting\n\nAbove we already saw what happens to key dimensions that we didn't explicitly assign to the Element using the ``.to`` method: they are grouped over, popping up a set of widgets so the user can select the values to show at any one time. However, using widgets is not always the most effective way to view the data, and a ``Dataset`` lets you specify other alternatives using the ``.overlay``, ``.grid`` and ``.layout`` methods. For instance, we can lay out each state separately using ``.grid``:", "_____no_output_____" ] ], [ [ "%%opts Curve [width=200] (color='indianred')\ngrouped = ds.select(State=states, Year=(1930, 2005)).to(hv.Curve, 'Year', 'measles')\ngrouped.grid('State')", "_____no_output_____" ] ], [ [ "Or we can take the same grouped object and ``.overlay`` the individual curves instead of laying them out in a grid:", "_____no_output_____" ] ], [ [ "%%opts Curve [width=600] (color=Cycle(values=['indianred', 'slateblue', 'lightseagreen', 'coral']))\ngrouped.overlay('State')", "_____no_output_____" ] ], [ [ "These faceting methods even compose together, meaning that if we had more key dimensions we could ``.overlay`` one dimension, ``.grid`` another and have a widget for any other remaining key dimensions.", "_____no_output_____" ], [ "### Aggregating\n\nInstead of selecting a subset of the data, another common operation supported by HoloViews is computing aggregates. When we first loaded this dataset, we aggregated over the 'Week' column to compute the mean incidence for every year, thereby reducing our data significantly. The ``aggregate`` method is therefore very useful to compute statistics from our data.\n\nA simple example using our dataset is to compute the mean and standard deviation of the Measles Incidence by ``'Year'``. We can express this simply by passing the key dimensions to aggregate over (in this case just the 'Year') along with a function and optional ``spreadfn`` to compute the statistics we want. The ``spread_fn`` will append the name of the function to the dimension name so we can reference the computed value separately. Once we have computed the aggregate, we can simply cast it to a ``Curve`` and ``ErrorBars``:", "_____no_output_____" ] ], [ [ "%%opts Curve [width=600]\nagg = ds.aggregate('Year', function=np.mean, spreadfn=np.std)\n(hv.Curve(agg) * hv.ErrorBars(agg,vdims=['measles', 'measles_std'])).redim.range(measles=(0, None))", "_____no_output_____" ] ], [ [ "In this way we can summarize a multi-dimensional dataset as something that can be visualized directly, while allowing us to compute arbitrary statistics along a dimension.\n\n## Other data\n\nIf you want to know more about working with tabular data, particularly when using datatypes other than pandas, have a look at the [user guide](../user_guide/07-Tabular_Datasets.ipynb). The different interfaces allow you to work with everything from simple NumPy arrays to out-of-core dataframes using dask. Dask dataframes scale to visualizations of billions of rows, when using [datashader](https://anaconda.org/jbednar/holoviews_datashader/notebook) with HoloViews to aggregate the data as needed.", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown" ] ]
d0170262d2a88a3668c14060bd1c247a8a122820
367,449
ipynb
Jupyter Notebook
docs/examples/quickstart.ipynb
zmoon/xmovie
023abcb3c14c7c21c90d665c41892f271dc8b4cd
[ "MIT" ]
null
null
null
docs/examples/quickstart.ipynb
zmoon/xmovie
023abcb3c14c7c21c90d665c41892f271dc8b4cd
[ "MIT" ]
null
null
null
docs/examples/quickstart.ipynb
zmoon/xmovie
023abcb3c14c7c21c90d665c41892f271dc8b4cd
[ "MIT" ]
null
null
null
507.526243
199,204
0.945837
[ [ [ "# First steps with xmovie", "_____no_output_____" ] ], [ [ "import warnings\n\nimport matplotlib.pyplot as plt\nimport xarray as xr\nfrom shapely.errors import ShapelyDeprecationWarning\nfrom xmovie import Movie\n\nwarnings.filterwarnings(\n action='ignore',\n category=ShapelyDeprecationWarning, # in cartopy\n)\nwarnings.filterwarnings(\n action=\"ignore\",\n category=UserWarning,\n message=r\"No `(vmin|vmax)` provided. Data limits are calculated from input. Depending on the input this can take long. Pass `\\1` to avoid this step\"\n)\n\n%matplotlib inline", "_____no_output_____" ] ], [ [ "## Basics", "_____no_output_____" ] ], [ [ "# Load test dataset\nds = xr.tutorial.open_dataset('air_temperature').isel(time=slice(0, 150))\n\n# Create movie object\nmov = Movie(ds.air)", "_____no_output_____" ] ], [ [ "### Preview movie frames", "_____no_output_____" ] ], [ [ "# Preview 10th frame\nmov.preview(10)\nplt.savefig(\"movie_preview.png\")", "_____no_output_____" ], [ "! rm -f frame*.png *.mp4 *.gif", "rm: cannot remove 'frame*.png': No such file or directory\nrm: cannot remove '*.mp4': No such file or directory\nrm: cannot remove '*.gif': No such file or directory\n" ] ], [ [ "### Create movie files", "_____no_output_____" ] ], [ [ "mov.save('movie.mp4') # Use to save a high quality mp4 movie\nmov.save('movie_gif.gif') # Use to save a gif", "Movie created at movie.mp4\nMovie created at movie_mp4.mp4\nGIF created at movie_gif.gif\n" ] ], [ [ "In many cases it is useful to have both a high quality movie and a lower resolution gif of the same animation. If that is desired, just deactivate the `remove_movie` option and give a filename with `.gif`. xmovie will first render a high quality movie and then convert it to a gif, without removing the movie afterwards.", "_____no_output_____" ], [ "### Optional frame-generation progress bars", "_____no_output_____" ], [ "Display a progressbar with `progress=True`, (requires tqdm). This can be helpful for long running animations.", "_____no_output_____" ] ], [ [ "mov.save('movie_combo.gif', remove_movie=False, progress=True)", "_____no_output_____" ] ], [ [ "Modify the framerate of the output with the keyword arguments `framerate` (for movies) and `gif_framerate` (for gifs).", "_____no_output_____" ] ], [ [ "mov.save('movie_fast.gif', remove_movie=False, progress=True, framerate=20, gif_framerate=20)\nmov.save('movie_slow.gif', remove_movie=False, progress=True, framerate=5, gif_framerate=5)", "_____no_output_____" ] ], [ [ "![movie_fast.gif](movie_fast.gif)\n![movie_slow.gif](movie_slow.gif)", "_____no_output_____" ], [ "![](movie_combo.gif)", "_____no_output_____" ], [ "### Frame dimension selection", "_____no_output_____" ], [ "By default, the movie passes through the `'time'` dimension of the DataArray, but this can be easily changed with the `framedim` argument:", "_____no_output_____" ] ], [ [ "mov = Movie(ds.air, framedim='lon')\nmov.save('lon_movie.gif')", "Movie created at lon_movie.mp4\nGIF created at lon_movie.gif\n" ] ], [ [ "![](lon_movie.gif)", "_____no_output_____" ], [ "## Modifying plots", "_____no_output_____" ], [ "### Rotating globe (preset)", "_____no_output_____" ] ], [ [ "from xmovie.presets import rotating_globe\n\nmov = Movie(ds.air, plotfunc=rotating_globe)\nmov.save('movie_rotating.gif', progress=True)", "_____no_output_____" ] ], [ [ "![movie_rotating.gif](movie_rotating.gif)", "_____no_output_____" ] ], [ [ "mov = Movie(ds.air, plotfunc=rotating_globe, style='dark')\nmov.save('movie_rotating_dark.gif', progress=True)", "_____no_output_____" ] ], [ [ "![](movie_rotating_dark.gif)", "_____no_output_____" ], [ "### Specifying xarray plot method to be used", "_____no_output_____" ], [ "Change the plotting function with the parameter `plotmethod`.", "_____no_output_____" ] ], [ [ "mov = Movie(ds.air, rotating_globe, plotmethod='contour')\nmov.save('movie_cont.gif')\n\nmov = Movie(ds.air, rotating_globe, plotmethod='contourf')\nmov.save('movie_contf.gif')", "Movie created at movie_cont.mp4\nGIF created at movie_cont.gif\nMovie created at movie_contf.mp4\nGIF created at movie_contf.gif\n" ] ], [ [ "![](movie_cont.gif)\n![](movie_contf.gif)", "_____no_output_____" ], [ "### Changing preset settings", "_____no_output_____" ] ], [ [ "import numpy as np\nds = xr.tutorial.open_dataset('rasm', decode_times=False).Tair # 36 times in total\n\n# Interpolate time for smoother animation\nds['time'].values[:] = np.arange(len(ds['time']))\nds = ds.interp(time=np.linspace(0, 10, 60))\n\n# `Movie` accepts keywords for the xarray plotting interface and provides a set of 'own' keywords like \n# `coast`, `land` and `style` to facilitate the styling of plots\nmov = Movie(ds, rotating_globe,\n # Keyword arguments to the xarray plotting interface\n cmap='RdYlBu_r',\n x='xc',\n y='yc',\n shading='auto',\n # Custom keyword arguments to `rotating_globe\n lat_start=45,\n lat_rotations=0.05,\n lon_rotations=0.2,\n land=False,\n coastline=True,\n style='dark')\nmov.save('movie_rasm.gif', progress=True)", "_____no_output_____" ] ], [ [ "![](movie_rasm.gif)", "_____no_output_____" ], [ "### User-provided", "_____no_output_____" ], [ "Besides the presets, xmovie is designed to animate any custom plot which can be wrapped in a function acting on a matplotlib figure. This can contain xarray plotting commands, 'pure' matplotlib or a combination of both. This can come in handy when you want to animate a complex static plot.", "_____no_output_____" ] ], [ [ "ds = xr.tutorial.open_dataset('rasm', decode_times=False).Tair\nfig = plt.figure(figsize=[10,5])\ntt = 30\n\nstation = dict(x=100, y=150)\nds_station = ds.sel(**station)\n\n(ax1, ax2) = fig.subplots(ncols=2)\nds.isel(time=tt).plot(ax=ax1)\nax1.plot(station['x'], station['y'], marker='*', color='k' ,markersize=15)\nax1.text(station['x']+4, station['y']+4, 'Station', color='k' )\nax1.set_aspect(1)\nax1.set_facecolor('0.5')\nax1.set_title('');\n\n# Time series\nds_station.isel(time=slice(0,tt+1)).plot.line(ax=ax2, x='time')\nax2.set_xlim(ds.time.min().data, ds.time.max().data)\nax2.set_ylim(ds_station.min(), ds_station.max())\nax2.set_title('Data at station');\n\nfig.subplots_adjust(wspace=0.6)\n\nfig.savefig(\"static.png\")", "_____no_output_____" ] ], [ [ "All you need to do is wrap your plotting calls into a functions `func(ds, fig, frame)`, where ds is an xarray dataset you pass to `Movie`, fig is a matplotlib.figure handle and tt is the movie frame.", "_____no_output_____" ] ], [ [ "def custom_plotfunc(ds, fig, tt, *args, **kwargs):\n # Define station location for timeseries\n station = dict(x=100, y=150)\n ds_station = ds.sel(**station)\n\n (ax1, ax2) = fig.subplots(ncols=2)\n \n # Map axis\n # Colorlimits need to be fixed or your video is going to cause seizures.\n # This is the only modification from the code above!\n ds.isel(time=tt).plot(ax=ax1, vmin=ds.min(), vmax=ds.max(), cmap='RdBu_r')\n ax1.plot(station['x'], station['y'], marker='*', color='k' ,markersize=15)\n ax1.text(station['x']+4, station['y']+4, 'Station', color='k' )\n ax1.set_aspect(1)\n ax1.set_facecolor('0.5')\n ax1.set_title('');\n\n # Time series\n ds_station.isel(time=slice(0,tt+1)).plot.line(ax=ax2, x='time')\n ax2.set_xlim(ds.time.min().data, ds.time.max().data)\n ax2.set_ylim(ds_station.min(), ds_station.max())\n ax2.set_title('Data at station');\n\n fig.subplots_adjust(wspace=0.6)\n \n return None, None \n # ^ This is not strictly necessary, but otherwise a warning will be raised. \n\n\nmov_custom = Movie(ds, custom_plotfunc)\nmov_custom.preview(30)", "_____no_output_____" ], [ "mov_custom.save('movie_custom.gif', progress=True) ", "_____no_output_____" ] ], [ [ "![](movie_custom.gif)", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown" ]
[ [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown" ], [ "code" ], [ "markdown", "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ] ]
d017057f1c7344b7aa5c167cda368c9c72f6f1bf
1,989
ipynb
Jupyter Notebook
math_symbol_prac.ipynb
Sumi-Lee/testrepository
2e1c01996471b7badf979b739631464ae3dc6f69
[ "MIT" ]
null
null
null
math_symbol_prac.ipynb
Sumi-Lee/testrepository
2e1c01996471b7badf979b739631464ae3dc6f69
[ "MIT" ]
null
null
null
math_symbol_prac.ipynb
Sumi-Lee/testrepository
2e1c01996471b7badf979b739631464ae3dc6f69
[ "MIT" ]
null
null
null
21.857143
236
0.425842
[ [ [ "<a href=\"https://colab.research.google.com/github/Sumi-Lee/testrepository/blob/main/math_symbol_prac.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>", "_____no_output_____" ], [ "## 수학 기호 연습\n\n수식 기호들을 집어 넣는 연습을 해봅시다. \n$\\theta = 1$ $1 \\le 5 $ \n$\\sum_{i=1}^{n} i^2 $ ", "_____no_output_____" ], [ "$$\n\\sum_{i=1}^{n} \\frac{1}{i} $$", "_____no_output_____" ] ], [ [ "1+1", "_____no_output_____" ], [ "", "_____no_output_____" ] ] ]
[ "markdown", "code" ]
[ [ "markdown", "markdown", "markdown" ], [ "code", "code" ] ]
d01707e33fc3880a2d1c22cdbe7bc11565c6a3bb
204,994
ipynb
Jupyter Notebook
analyses/2018-11-07-summarize-titers-and-sequences-by-date.ipynb
blab/flu-forecasting
723c515ba2e8813f081ae48b23d63871e9e3db4e
[ "MIT" ]
2
2020-08-19T04:09:28.000Z
2021-07-05T02:32:04.000Z
analyses/2018-11-07-summarize-titers-and-sequences-by-date.ipynb
elifesciences-publications/flu-forecasting
1fee7ab1f755ad8ae5be28542045b5b609e4774b
[ "MIT" ]
null
null
null
analyses/2018-11-07-summarize-titers-and-sequences-by-date.ipynb
elifesciences-publications/flu-forecasting
1fee7ab1f755ad8ae5be28542045b5b609e4774b
[ "MIT" ]
1
2020-09-01T11:45:41.000Z
2020-09-01T11:45:41.000Z
264.850129
109,904
0.913124
[ [ [ "# Summarize titers and sequences by date\n\nCreate a single histogram on the same scale for number of titer measurements and number of genomic sequences per year to show the relative contribution of each data source.", "_____no_output_____" ] ], [ [ "import Bio\nimport Bio.SeqIO\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n%matplotlib inline", "_____no_output_____" ], [ "# Configure matplotlib theme.\nfontsize = 14\nmatplotlib_params = {\n 'axes.labelsize': fontsize,\n 'font.size': fontsize,\n 'legend.fontsize': 12,\n 'xtick.labelsize': fontsize,\n 'ytick.labelsize': fontsize,\n 'text.usetex': False,\n 'figure.figsize': [6, 4],\n 'savefig.dpi': 300,\n 'figure.dpi': 300,\n 'text.usetex': False\n}\n\nplt.rcParams.update(matplotlib_params)\n\n# Turn off spines for all plots.\nplt.rc(\"axes.spines\", top=False, right=False)", "_____no_output_____" ], [ "matplotlib.get_configdir()", "_____no_output_____" ], [ "plt.style.use(\"huddlej\")", "_____no_output_____" ], [ "plt.style.available", "_____no_output_____" ] ], [ [ "## Load sequences", "_____no_output_____" ] ], [ [ "ls ../../seasonal-flu/data/*.fasta", "../../seasonal-flu/data/h1n1pdm_ha.fasta\n../../seasonal-flu/data/h1n1pdm_na.fasta\n../../seasonal-flu/data/h3n2_ha.fasta\n../../seasonal-flu/data/h3n2_na.fasta\n" ], [ "# Open FASTA of HA sequences for H3N2.\nsequences = Bio.SeqIO.parse(\"../../seasonal-flu/data/h3n2_ha.fasta\", \"fasta\")", "_____no_output_____" ], [ "# Get strain names from sequences.\ndistinct_strains_with_sequences = pd.Series([sequence.name.split(\"|\")[0].replace(\"-egg\", \"\")\n for sequence in sequences]).drop_duplicates()", "_____no_output_____" ], [ "distinct_strains_with_sequences.shape", "_____no_output_____" ], [ "# Parse years from distinct strains with titers.\nsequence_years = distinct_strains_with_sequences.apply(lambda strain: int(strain.split(\"/\")[-1])).values", "_____no_output_____" ], [ "# Omit invalid sequence years.\nsequence_years = sequence_years[sequence_years < 2019]", "_____no_output_____" ], [ "sequence_years.shape", "_____no_output_____" ] ], [ [ "## Load titers", "_____no_output_____" ] ], [ [ "# Read titers into a data frame.\ntiters = pd.read_table(\n \"../../seasonal-flu/data/cdc_h3n2_egg_hi_titers.tsv\",\n header=None,\n index_col=False,\n names=[\"test\", \"reference\", \"serum\", \"source\", \"titer\", \"assay\"]\n)", "_____no_output_____" ], [ "titers.head()", "_____no_output_____" ], [ "titers[\"test_year\"] = titers[\"test\"].apply(lambda strain: int(strain.replace(\"-egg\", \"\").split(\"/\")[-1]))", "_____no_output_____" ], [ "(titers[\"test_year\"] < 2007).sum()", "_____no_output_____" ], [ "titers[\"test_year\"].value_counts()", "_____no_output_____" ], [ "titers.shape", "_____no_output_____" ], [ "titers[titers[\"test_year\"] < 2007][\"test\"].unique().shape", "_____no_output_____" ], [ "titers[titers[\"test_year\"] < 2007][\"test\"].unique()", "_____no_output_____" ], [ "# Identify distinct viruses represented as test strains in titers.\ndistinct_strains_with_titers = titers[\"test\"].str.replace(\"-egg\", \"\").drop_duplicates()", "_____no_output_____" ], [ "# Parse years from distinct strains with titers.\ntiter_years = distinct_strains_with_titers.apply(lambda strain: int(strain.split(\"/\")[-1])).values", "_____no_output_____" ], [ "# Omit invalid titer years.\ntiter_years = titer_years[titer_years < 2019]", "_____no_output_____" ], [ "titer_years.shape", "_____no_output_____" ] ], [ [ "## Plot sequence and titer strains by year", "_____no_output_____" ] ], [ [ "sequence_years.min()", "_____no_output_____" ], [ "sequence_years.max()", "_____no_output_____" ], [ "[sequence_years, titer_years]", "_____no_output_____" ], [ "sequence", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\nbins = np.arange(1968, 2019)\nax.hist([sequence_years, titer_years], bins, histtype=\"bar\", label=[\"HA sequence\", \"HI titer\"])\n\nlegend = ax.legend(\n loc=\"upper left\",\n ncol=1,\n frameon=False,\n handlelength=1,\n fancybox=False,\n handleheight=1\n)\nlegend.set_title(\"Virus measurement\", prop={\"size\": 12})\nlegend._legend_box.align = \"left\"\n\nax.set_xlim(1990)\nax.set_xlabel(\"Year\")\nax.set_ylabel(\"Number of viruses measured\")", "_____no_output_____" ], [ "fig, ax = plt.subplots(1, 1)\nbins = np.arange(1968, 2019)\nax.hist([titer_years], bins, histtype=\"bar\", label=[\"HI titer\"])\n\nax.set_xlim(1990)\nax.set_xlabel(\"Year\")\nax.set_ylabel(\"Viruses measured by HI\")", "_____no_output_____" ], [ "len(titer_years)", "_____no_output_____" ], [ "(titer_years < 2010).sum()", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown" ], [ "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code", "code", "code" ] ]
d017142e18b6cedffe9f6267e3b537ae62745f63
4,202
ipynb
Jupyter Notebook
examples/HSMfile_examples.ipynb
hadfieldnz/notebooks
1f840794d5f19ef469fe6a30d82d98955b47039a
[ "MIT" ]
null
null
null
examples/HSMfile_examples.ipynb
hadfieldnz/notebooks
1f840794d5f19ef469fe6a30d82d98955b47039a
[ "MIT" ]
null
null
null
examples/HSMfile_examples.ipynb
hadfieldnz/notebooks
1f840794d5f19ef469fe6a30d82d98955b47039a
[ "MIT" ]
null
null
null
22.713514
217
0.562113
[ [ [ "# HSMfile examples", "_____no_output_____" ], [ "The [hsmfile module](https://github.com/hadfieldnz/hsmfile) is modelled on my IDL mgh_san routines and provides user-customisable access to remote (slow-access) and local (fast-access) files.\n\nThis notebook exercises various aspects of the hsmfile module.\n\nChange history:\n\nMGH 2019-08-15\n - afile is now called hsmfile.\n\nMGH 2019-08-07\n - Modified for afile.\n\nMGH 2019-05-06\n - Written for afile's predecessor, mgh_san.", "_____no_output_____" ] ], [ [ "import os\nimport hsmfile", "_____no_output_____" ] ], [ [ "The following cell should be executed whenever the hsmfile module code has been changed.", "_____no_output_____" ] ], [ [ "from importlib import reload\nreload(hsmfile);", "_____no_output_____" ] ], [ [ "Print the volumes supported by the hsmfile module on this platform", "_____no_output_____" ] ], [ [ "print(hsmfile.volume.keys())", "_____no_output_____" ] ], [ [ "Specify the files for which we will search (Cook Strait Narrows 1 km run). Normally", "_____no_output_____" ] ], [ [ "vol = '/nesi/nobackup/niwa00020/hadfield'\n\nsub = 'work/cook/roms/sim34/run'\n\npattern = 'bran-2009-2012-nzlam-1.20-detide/roms_avg_????.nc'", "_____no_output_____" ] ], [ [ "The hsmfile.path function returns a pathlib Path object. Here we construct the path names for the base directory on the remote, or master, volume (mirror = False) and the local, or mirror, volume (mirror = True)", "_____no_output_____" ] ], [ [ "hsmfile.path(sub=sub,vol=vol,mirror=False)", "_____no_output_____" ], [ "if 'mirror' in hsmfile.volume[vol]:\n print(repr(hsmfile.path(sub=sub,vol=vol,mirror=True)))\nelse:\n print('Volume has no mirror')", "_____no_output_____" ] ], [ [ "The hsmfile.search function uses the Path's glob function to create a generator object and from that generates and returns a sorted list of Path objects relative to the base. ", "_____no_output_____" ] ], [ [ "match = hsmfile.search(pattern,sub=sub,vol=vol); match", "_____no_output_____" ] ], [ [ "The hsmfile.file function constructs and returns a list of path objects representing actual files. It checks for existence and copies from master to mirror as necessary.", "_____no_output_____" ] ], [ [ "file = [hsmfile.file(m,sub=sub,vol=vol) for m in match]; file", "_____no_output_____" ] ] ]
[ "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code", "markdown", "code" ]
[ [ "markdown", "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code", "code" ], [ "markdown" ], [ "code" ], [ "markdown" ], [ "code" ] ]
d01715f5b1aa97191e22a4a61fa92ff76bc8e77a
11,701
ipynb
Jupyter Notebook
_note_/内置包/re_正则处理.ipynb
By2048/_python_
be57738093676a1273e6f69232723669e408986e
[ "MIT" ]
null
null
null
_note_/内置包/re_正则处理.ipynb
By2048/_python_
be57738093676a1273e6f69232723669e408986e
[ "MIT" ]
null
null
null
_note_/内置包/re_正则处理.ipynb
By2048/_python_
be57738093676a1273e6f69232723669e408986e
[ "MIT" ]
null
null
null
31.037135
143
0.414153
[ [ [ "import re\nimport pprint\nimport json\nimport logging", "_____no_output_____" ], [ "# re.match(pattern, string, flags=0)\n\nprint(re.match('www', 'www.qwer.com').span()) # 在起始位置匹配\nprint(re.match('com', 'www.qwer.com')) # 不在起始位置匹配\n", "_____no_output_____" ], [ "\nline = \"Cats are smarter than dogs\"\nmatchObj = re.match(r'(.*) are (.*?) (.*)', line, re.M | re.I)\nif matchObj:\n print(\"matchObj.group() : \", matchObj.group())\n print(\"matchObj.group(1) : \", matchObj.group(1))\n print(\"matchObj.group(2) : \", matchObj.group(2))\nelse:\n print(\"No match!!\")\n", "_____no_output_____" ], [ "# re.search(pattern, string, flags=0)\n\nprint(re.search('www', 'www.1234.com').span()) # 在起始位置匹配\nprint(re.search('com', 'www.1234.com').span()) # 不在起始位置匹配\n", "_____no_output_____" ], [ "line = \"Cats are smarter than dogs\"\nsearchObj = re.search(r'(.*) are (.*?) (.*)', line, re.M | re.I)\nif searchObj:\n print(\"searchObj.group() : \", searchObj.group())\n print(\"searchObj.group(1) : \", searchObj.group(1))\n print(\"searchObj.group(2) : \", searchObj.group(2))\nelse:\n print(\"Nothing found!!\")\n", "_____no_output_____" ], [ "# re.match 只匹配字符串的开始,如果字符串开始不符合正则表达式,则匹配失败,函数返回 None\n# re.search 匹配整个字符串,直到找到一个匹配\n\nline = \"Cats are smarter than dogs\"\n\nmatchObj = re.match(r'dogs', line, re.M | re.I)\nif matchObj:\n print(\"match --> matchObj.group() : \", matchObj.group())\nelse:\n print(\"No match!!\")\n\nmatchObj = re.search(r'dogs', line, re.M | re.I)\nif matchObj:\n print(\"search --> matchObj.group() : \", matchObj.group())\nelse:\n print(\"No match!!\")\n", "_____no_output_____" ] ], [ [ "## 参数\n\n| | |\n| --------- | ---------------------------------------------------------------------- |\n| 参数 | 描述 |\n| `pattern` | 匹配的正则表达式 |\n| `string` | 要匹配的字符串。 |\n| `flags` | 标志位,用于控制正则表达式的匹配方式,如:是否区分大小写,多行匹配等等 |\n\n\n| | |\n| ------ | -------------------------------------------------------------- |\n| 修饰符 | 描述 |\n| `re.I` | 使匹配对大小写不敏感 |\n| `re.L` | 做本地化识别(locale-aware)匹配 |\n| `re.M` | 多行匹配,影响 ^ 和 $ |\n| `re.S` | 使 . 匹配包括换行在内的所有字符 |\n| `re.U` | 根据Unicode字符集解析字符。这个标志影响 `\\w`,` \\W`, `\\b`, `\\B` |\n| `re.X` | 该标志通过给予你更灵活的格式以便你将正则表达式写得更易于理解。 |\n\n\n可以使用`group(num)` 或 `groups()` 匹配对象函数来获取匹配表达式。\n\n| | |\n| -------------- | --------------------------------------------------------- |\n| 匹配对象方法 | 描述 |\n| `group(num=0)` | 匹配的整个表达式的字符串,`group()` |\n| | 可以一次输入多个组号,在这种情况下它 将返回一个包含那些组所对应值的元组。 |\n| `groups()` | 返回一个包含所有小组字符串的元组,从 1 到 所含的小组号。 |\n\n| | |\n| --------------------- | -------------------------------------------------------------------------------------------------- |\n| `group([group1, …]`) | 获得一个或多个分组匹配的字符串,当要获得整个匹配的子串时,可直接使用 `group()` 或 `group(0)` |\n| `start([group])` | 获取分组匹配的子串在整个字符串中的起始位置(子串第一个字符的索引),参数默认值为 0; |\n| `end([group])` | 获取分组匹配的子串在整个字符串中的结束位置(子串最后一个字符的索引+1),参数默认值为 0; |\n| `span([group])` | 方法返回 `(start(group), end(group))` |\n", "_____no_output_____" ] ], [ [ "# re.sub(pattern, repl, string, count=0, flags=0)\n# pattern 正则中的模式字符串。 \n# repl 替换的字符串,也可为一个函数。 \n# string 要被查找替换的原始字符串。 \n# count 模式匹配后替换的最大次数,默认 0 表示替换所有的匹配。\n\nphone = \"123-456-789 # 这是一个电话号码\"\n\nprint(re.sub(r'#.*$', \"\", phone))\nprint(re.sub(r'\\D', \"\", phone))\n", "_____no_output_____" ], [ "def double(matched):\n \"\"\"将匹配的数字*2\n :param matched: 传入的匹配的参数 value\n :return: str 类型的 value*2\n \"\"\"\n value = int(matched.group('value'))\n return str(value * 2)\n\n\ns = 'A1111G4HFD2222'\nprint(re.sub('(?P<value>\\d+)', double, s))\n", "_____no_output_____" ], [ "# 编译表达式 re.compile(pattern[, flags])\n\n# pattern 一个字符串形式的正则表达式\n# flags 可选,表示匹配模式,比如忽略大小写,多行模式等,具体参数为\n# re.I 忽略大小写 \n# re.L 表示特殊字符集 `\\w`, `\\W`, `\\b`, `\\B`, `\\s`,`\\S` 依赖于当前环境 \n# re.M 多行模式 \n# re.S 即为 . 并且包括换行符在内的任意字符(. 不包括换行符)\n# re.U 表示特殊字符集 `\\w`, `\\W`, `\\b`, `\\B`, `\\d`, `\\D`, `\\s`, `\\S` 依赖于 Unicode 字符属性据库 \n# re.X 为了增加可读性,忽略空格和 # 后面的注释 \n\npattern = re.compile(r'\\d+')\n\nmath_item = pattern.match('one12twothree34four')\nprint(1, math_item)\n\nmath_item = pattern.match('one12twothree34four', 2, 10)\nprint(2, math_item)\n\nmath_item = pattern.match('one12twothree34four', 3, 10)\nprint(3, math_item) # 返回一个 Match 对象\n\n# 可省略 0\nprint(1, math_item.group(0))\nprint(2, math_item.start(0))\nprint(3, math_item.end(0))\nprint(4, math_item.span(0))\n", "_____no_output_____" ], [ "pattern = re.compile(r'([a-z]+) ([a-z]+)', re.I)\nmath_item = pattern.match('Hello World Wide Web')\n\nprint(1, math_item) # 匹配成功,返回一个 Match 对象\nprint(1, math_item.group(0)) # 返回匹配成功的整个子串\nprint(1, math_item.span(0)) # 返回匹配成功的整个子串的索引\n\nprint(2, math_item.group(1)) # 返回第一个分组匹配成功的子串\nprint(2, math_item.span(1)) # 返回第一个分组匹配成功的子串的索引\n\nprint(3, math_item.group(2)) # 返回第二个分组匹配成功的子串\nprint(3, math_item.span(2)) # 返回第二个分组匹配成功的子串\n\nprint(4, math_item.groups()) # 等价于 (m.group(1), m.group(2), ...)\n", "_____no_output_____" ], [ "\ntry:\n item = math_item.group(3) # 不存在第三个分组\nexcept IndexError as e:\n print(e)\n\n# 查找所有 re.findall(string[, pos[, endpos]])\n# string 待匹配的字符串。 \n# pos 可选参数,指定字符串的起始位置,默认为 0。 \n# endpos 可选参数,指定字符串的结束位置,默认为字符串的长度\n\npattern = re.compile(r'\\d+')\nprint(1, pattern.findall('qwer 123 google 456'))\nprint(1, pattern.findall('qwe88rty123456google456', 0, 10))\n\n# 查找所有 `re.finditer` 和 `re.findall` 类似,在字符串中找到正则表达式所匹配的所有子串,并把它们作为一个迭代器返回。\n\nmatchs = re.finditer(r\"\\d+\", \"12a32bc43jf3\")\nprint(2, matchs)\nfor item in matchs:\n print(3, item.group())\n", "_____no_output_____" ], [ "\n# 分割 re.split(pattern, string[, maxsplit=0, flags=0])\n# maxsplit 分隔次数,maxsplit = 1 分隔一次,默认为0,不限制次数\n\nprint(1, re.split('\\W+', 'runoob, runoob, runoob.'))\nprint(2, re.split('(\\W+)', ' runoob, runoob, runoob.'))\n\nprint(3, re.split('\\W+', ' runoob, runoob, runoob.', 1))\n\nprint(4, re.split('a*', 'hello world')) # 对于一个找不到匹配的字符串而言,split 不会对其作出分割\n", "_____no_output_____" ] ], [ [ "## 其他\n```\nre.RegexObject\nre.compile()\n返回\nRegexObject\n对象。\n\nre.MatchObject\ngroup()\n返回被\nRE\n匹配的字符串。\n```\n", "_____no_output_____" ] ], [ [ "dytt_title = \".*\\[(.*)\\].*\"\n\nname_0 = r\"罗拉快跑BD国德双语中字[电影天堂www.dy2018.com].mkv\"\nname_1 = r\"[电影天堂www.dy2018.com]罗拉快跑BD国德双语中字.mkv\"\n\nprint(1, re.findall(dytt_title, name_0))\nprint(1, re.findall(dytt_title, name_1))\n\ndata = \"xxxxxxxxxxxentry某某内容for-----------\"\nresult = re.findall(\".*entry(.*)for.*\", data)\nprint(3, result)\n", "_____no_output_____" ] ] ]
[ "code", "markdown", "code", "markdown", "code" ]
[ [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code", "code", "code", "code", "code", "code" ], [ "markdown" ], [ "code" ] ]