tomekkorbak/naughty_davinci
Updated
•
10
org_text
stringlengths 761
968k
| texts
sequence | scores
sequence | num_lines
int64 1
25.7k
| avg_score
float64 0
0.31
|
---|---|---|---|---|
#!/usr/bin/env python
""" patrol_smach.py - Version 1.0 2013-04-12
Control a robot to patrol a square area using SMACH
Created for the Pi Robot Project: http://www.pirobot.org
Copyright (c) 2013 Patrick Goebel. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.5
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details at:
http://www.gnu.org/licenses/gpl.htmlPoint
"""
import rospy
from smach import State, StateMachine
from smach_ros import SimpleActionState, IntrospectionServer
from geometry_msgs.msg import Twist
from rbx2_tasks.task_setup import *
class Patrol():
def __init__(self):
rospy.init_node('patrol_smach', anonymous=False)
# Set the shutdown function (stop the robot)
rospy.on_shutdown(self.shutdown)
# Initialize a number of parameters and variables
setup_task_environment(self)
# Track success rate of getting to the goal locations
self.n_succeeded = 0
self.n_aborted = 0
self.n_preempted = 0
# A list to hold then navigation waypoints
nav_states = list()
# Turn the waypoints into SMACH states
for waypoint in self.waypoints:
nav_goal = MoveBaseGoal()
nav_goal.target_pose.header.frame_id = 'base_footprint'
nav_goal.target_pose.pose = waypoint
move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,
exec_timeout=rospy.Duration(10.0),
server_wait_timeout=rospy.Duration(10.0))
nav_states.append(move_base_state)
# Initialize the patrol state machine
self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])
# Add the states to the state machine with the appropriate transitions
with self.sm_patrol:
StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})
StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})
StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})
StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})
StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'','aborted':'','preempted':''})
# Create and start the SMACH introspection server
intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')
intro_server.start()
# Execute the state machine for the specified number of patrols
while (self.n_patrols == -1 or self.patrol_count < self.n_patrols) and not rospy.is_shutdown():
sm_outcome = self.sm_patrol.execute()
self.patrol_count += 1
rospy.loginfo("FINISHED PATROL LOOP: " + str(self.patrol_count))
rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))
intro_server.stop()
def move_base_result_cb(self, userdata, status, result):
if status == actionlib.GoalStatus.SUCCEEDED:
self.n_succeeded += 1
elif status == actionlib.GoalStatus.ABORTED:
self.n_aborted += 1
elif status == actionlib.GoalStatus.PREEMPTED:
self.n_preempted += 1
try:
rospy.loginfo("Success rate: " + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))
except:
pass
def shutdown(self):
rospy.loginfo("Stopping the robot...")
self.sm_patrol.request_preempt()
self.cmd_vel_pub.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
try:
Patrol()
except rospy.ROSInterruptException:
rospy.loginfo("SMACH test finished.")
| [
"#!/usr/bin/env python\n",
"\n",
"\"\"\" patrol_smach.py - Version 1.0 2013-04-12\n",
"\n",
" Control a robot to patrol a square area using SMACH\n",
"\n",
" Created for the Pi Robot Project: http://www.pirobot.org\n",
" Copyright (c) 2013 Patrick Goebel. All rights reserved.\n",
"\n",
" This program is free software; you can redistribute it and/or modify\n",
" it under the terms of the GNU General Public License as published by\n",
" the Free Software Foundation; either version 2 of the License, or\n",
" (at your option) any later version.5\n",
" \n",
" This program is distributed in the hope that it will be useful,\n",
" but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
" MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
" GNU General Public License for more details at:\n",
" \n",
" http://www.gnu.org/licenses/gpl.htmlPoint\n",
" \n",
"\"\"\"\n",
"\n",
"import rospy\n",
"from smach import State, StateMachine\n",
"from smach_ros import SimpleActionState, IntrospectionServer\n",
"from geometry_msgs.msg import Twist\n",
"from rbx2_tasks.task_setup import *\n",
"\n",
"class Patrol():\n",
" def __init__(self):\n",
" rospy.init_node('patrol_smach', anonymous=False)\n",
" \n",
" # Set the shutdown function (stop the robot)\n",
" rospy.on_shutdown(self.shutdown)\n",
" \n",
" # Initialize a number of parameters and variables\n",
" setup_task_environment(self)\n",
" \n",
" # Track success rate of getting to the goal locations\n",
" self.n_succeeded = 0\n",
" self.n_aborted = 0\n",
" self.n_preempted = 0\n",
"\n",
" # A list to hold then navigation waypoints\n",
" nav_states = list()\n",
" \n",
" # Turn the waypoints into SMACH states\n",
" for waypoint in self.waypoints: \n",
" nav_goal = MoveBaseGoal()\n",
" nav_goal.target_pose.header.frame_id = 'base_footprint'\n",
" nav_goal.target_pose.pose = waypoint\n",
" move_base_state = SimpleActionState('move_base', MoveBaseAction, goal=nav_goal, result_cb=self.move_base_result_cb,\n",
" exec_timeout=rospy.Duration(10.0),\n",
" server_wait_timeout=rospy.Duration(10.0))\n",
" nav_states.append(move_base_state)\n",
" \n",
" # Initialize the patrol state machine\n",
" self.sm_patrol = StateMachine(outcomes=['succeeded','aborted','preempted'])\n",
"\n",
" # Add the states to the state machine with the appropriate transitions\n",
" with self.sm_patrol: \n",
" StateMachine.add('NAV_STATE_0', nav_states[0], transitions={'succeeded':'NAV_STATE_1','aborted':'NAV_STATE_1','preempted':'NAV_STATE_1'})\n",
" StateMachine.add('NAV_STATE_1', nav_states[1], transitions={'succeeded':'NAV_STATE_2','aborted':'NAV_STATE_2','preempted':'NAV_STATE_2'})\n",
" StateMachine.add('NAV_STATE_2', nav_states[2], transitions={'succeeded':'NAV_STATE_3','aborted':'NAV_STATE_3','preempted':'NAV_STATE_3'})\n",
" StateMachine.add('NAV_STATE_3', nav_states[3], transitions={'succeeded':'NAV_STATE_4','aborted':'NAV_STATE_4','preempted':'NAV_STATE_4'})\n",
" StateMachine.add('NAV_STATE_4', nav_states[0], transitions={'succeeded':'','aborted':'','preempted':''})\n",
" \n",
" # Create and start the SMACH introspection server\n",
" intro_server = IntrospectionServer('patrol', self.sm_patrol, '/SM_ROOT')\n",
" intro_server.start()\n",
" \n",
" # Execute the state machine for the specified number of patrols\n",
" while (self.n_patrols == -1 or self.patrol_count < self.n_patrols) and not rospy.is_shutdown():\n",
" sm_outcome = self.sm_patrol.execute()\n",
" self.patrol_count += 1\n",
" rospy.loginfo(\"FINISHED PATROL LOOP: \" + str(self.patrol_count))\n",
" \n",
" rospy.loginfo('State Machine Outcome: ' + str(sm_outcome))\n",
" \n",
" intro_server.stop()\n",
" \n",
" def move_base_result_cb(self, userdata, status, result):\n",
" if status == actionlib.GoalStatus.SUCCEEDED:\n",
" self.n_succeeded += 1\n",
" elif status == actionlib.GoalStatus.ABORTED:\n",
" self.n_aborted += 1\n",
" elif status == actionlib.GoalStatus.PREEMPTED:\n",
" self.n_preempted += 1\n",
"\n",
" try:\n",
" rospy.loginfo(\"Success rate: \" + str(100.0 * self.n_succeeded / (self.n_succeeded + self.n_aborted + self.n_preempted)))\n",
" except:\n",
" pass\n",
"\n",
" def shutdown(self):\n",
" rospy.loginfo(\"Stopping the robot...\")\n",
" \n",
" self.sm_patrol.request_preempt()\n",
" \n",
" self.cmd_vel_pub.publish(Twist())\n",
" \n",
" rospy.sleep(1)\n",
"\n",
"if __name__ == '__main__':\n",
" try:\n",
" Patrol()\n",
" except rospy.ROSInterruptException:\n",
" rospy.loginfo(\"SMACH test finished.\")\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.2,
0,
0,
0,
0,
0.2,
0,
0.14285714285714285,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.0196078431372549,
0,
0,
0,
0.0078125,
0.023809523809523808,
0.02197802197802198,
0,
0.1111111111111111,
0,
0.03571428571428571,
0,
0,
0.024390243902439025,
0.04,
0.04,
0.04,
0.04,
0.05128205128205128,
0.07692307692307693,
0,
0.012345679012345678,
0,
0.1111111111111111,
0,
0.009615384615384616,
0,
0,
0,
0.1111111111111111,
0,
0.058823529411764705,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.014925373134328358,
0.0625,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0.037037037037037035,
0,
0,
0,
0
] | 109 | 0.022425 |
from __future__ import division, print_function, absolute_import
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import tflearn
from tflearn.data_utils import to_categorical
import tflearn.data_utils as du
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
import matplotlib.pyplot as plt
import matplotlib as matplot
import seaborn as sns
import random
trainx = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv",header=None)
trainy = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv",header=None)
testx = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv",header=None)
testy = pd.read_csv("/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv",header=None)
# Split data into training set and validation set
#training images
trainx = trainx.values.astype('float32')
#training labels
trainy = trainy.values.astype('int32')-1
#testing images
testx = testx.values.astype('float32')
#testing labels
testy = testy.values.astype('int32')-1
original_trainy = trainy
#One Hot encoding of train labels.
trainy = to_categorical(trainy[:,0],28)
original_testy = testy
#One Hot encoding of test labels.
testy = to_categorical(testy[:,0],28)
# reshape input images to 28x28x1
trainx = trainx.reshape([-1, 32, 32, 1])
testx = testx.reshape([-1, 32, 32, 1])
arabic_labels = ['alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',
'reh', 'zain', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',
'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh']
#size of images should be 1200 by 2300
#@return - trainy_new: List of tuples that represent the corners of the 32x32 character box in clockwise order starting from top left
def get_image_batch(trainx, num_images):
pad_left = random.randint(5,2265)
pad_right = 2300-32-pad_left
pad_top = random.randint(5,1165)
pad_bottom = 1200-32-pad_top
trainx_new = np.empty((num_images, pad_left + pad_right + 32, pad_top + pad_bottom + 32, 1))
for i in range(num_images):
index = random.randint(0 ,len(trainx)-1)
trainx_new[i] = np.pad(trainx[index], ((pad_left, pad_right), (pad_top, pad_bottom), (0, 0)), 'constant')
trainy_new = [(pad_left,pad_top),(2300-pad_right,pad_top),(2300-pad_right,1200-pad_bottom),(pad_left,1200-pad_bottom)]
return trainx_new, trainy_new
for i in range(10):
images, labels = get_image_batch(trainx,1)
plt.imshow(images[0].squeeze().T)
print(labels)
plt.show()
#x = random.randint(0, 13440)
#plt.imshow(trainx_new[x].squeeze().T)
#plt.title(arabic_labels[original_trainy[x][0]])
#plt.show()
#Zero center every sample with specified mean. If not specified, the mean is evaluated over all samples.
trainx, mean1 = du.featurewise_zero_center(trainx)
testx, mean2 = du.featurewise_zero_center(testx)
print(trainx.shape, trainy.shape, testx.shape, testy.shape)
# Building convolutional network
network = input_data(shape=[None, 32, 32, 1], name='input')
network = conv_2d(network, 80, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 1024, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 28, activation='softmax')
network = regression(network, optimizer='sgd', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
#model complile
model = tflearn.DNN(network, tensorboard_verbose=0)
#model fitting
scores = []
for i in range(100):
model.fit({'input': trainx}, {'target': trainy}, n_epoch=1,
validation_set=({'input': testx}, {'target': testy}),
snapshot_step=100, show_metric=True, run_id='convnet_arabic_digits')
score = model.evaluate(testx, testy)
print('Test accuracy: %0.2f%%' % (score[0] * 100))
scores.append(score[0]*100)
print(scores)
x = list(range(len(scores)))
y = []
for el in x:
y.append(el + 1)
plt.plot(y, scores, 'k-')
plt.title("Accuracy vs Epochs Trained")
plt.xlabel("Num Epochs")
plt.ylabel("Accuracy on Testing Data")
plt.grid()
plt.show(block=False)
plt.pause(.1)
plt.savefig('AccuracyGraph.pdf')
print(scores)
| [
"from __future__ import division, print_function, absolute_import\n",
"\n",
"import numpy as np # linear algebra\n",
"import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
"import tensorflow as tf\n",
"import tflearn\n",
"from tflearn.data_utils import to_categorical\n",
"import tflearn.data_utils as du\n",
"from tflearn.layers.core import input_data, dropout, fully_connected\n",
"from tflearn.layers.conv import conv_2d, max_pool_2d\n",
"from tflearn.layers.normalization import local_response_normalization\n",
"from tflearn.layers.estimator import regression\n",
"import matplotlib.pyplot as plt\n",
"import matplotlib as matplot\n",
"import seaborn as sns\n",
"import random\n",
"\n",
"\n",
"trainx = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv\",header=None)\n",
"trainy = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv\",header=None)\n",
"\n",
"testx = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv\",header=None)\n",
"testy = pd.read_csv(\"/home/cheesecake/GAT/gat/scraping/ArabicTextExtractor/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv\",header=None)\n",
"# Split data into training set and validation set\n",
"#training images\n",
"trainx = trainx.values.astype('float32')\n",
"#training labels\n",
"trainy = trainy.values.astype('int32')-1\n",
"#testing images\n",
"testx = testx.values.astype('float32')\n",
"#testing labels\n",
"testy = testy.values.astype('int32')-1\n",
"original_trainy = trainy\n",
"#One Hot encoding of train labels.\n",
"trainy = to_categorical(trainy[:,0],28)\n",
"original_testy = testy\n",
"#One Hot encoding of test labels.\n",
"testy = to_categorical(testy[:,0],28)\n",
"# reshape input images to 28x28x1\n",
"trainx = trainx.reshape([-1, 32, 32, 1])\n",
"testx = testx.reshape([-1, 32, 32, 1])\n",
"\n",
"arabic_labels = ['alef', 'beh', 'teh', 'theh', 'jeem', 'hah', 'khah', 'dal', 'thal',\n",
" 'reh', 'zain', 'seen', 'sheen', 'sad', 'dad', 'tah', 'zah', 'ain',\n",
" 'ghain', 'feh', 'qaf', 'kaf', 'lam', 'meem', 'noon', 'heh', 'waw', 'yeh']\n",
"\n",
"#size of images should be 1200 by 2300\n",
"\n",
"\n",
"#@return - trainy_new: List of tuples that represent the corners of the 32x32 character box in clockwise order starting from top left\n",
"def get_image_batch(trainx, num_images):\n",
" pad_left = random.randint(5,2265)\n",
" pad_right = 2300-32-pad_left\n",
" pad_top = random.randint(5,1165)\n",
" pad_bottom = 1200-32-pad_top\n",
" trainx_new = np.empty((num_images, pad_left + pad_right + 32, pad_top + pad_bottom + 32, 1))\n",
" for i in range(num_images):\n",
" index = random.randint(0 ,len(trainx)-1)\n",
" trainx_new[i] = np.pad(trainx[index], ((pad_left, pad_right), (pad_top, pad_bottom), (0, 0)), 'constant')\n",
" trainy_new = [(pad_left,pad_top),(2300-pad_right,pad_top),(2300-pad_right,1200-pad_bottom),(pad_left,1200-pad_bottom)]\n",
" return trainx_new, trainy_new\n",
"\n",
"\n",
"\n",
"for i in range(10):\n",
" images, labels = get_image_batch(trainx,1)\n",
" plt.imshow(images[0].squeeze().T)\n",
" print(labels)\n",
" plt.show()\n",
"\n",
"#x = random.randint(0, 13440)\n",
"#plt.imshow(trainx_new[x].squeeze().T)\n",
"#plt.title(arabic_labels[original_trainy[x][0]])\n",
"#plt.show()\n",
"\n",
"#Zero center every sample with specified mean. If not specified, the mean is evaluated over all samples.\n",
"trainx, mean1 = du.featurewise_zero_center(trainx)\n",
"testx, mean2 = du.featurewise_zero_center(testx)\n",
"\n",
"print(trainx.shape, trainy.shape, testx.shape, testy.shape)\n",
"\n",
"# Building convolutional network\n",
"network = input_data(shape=[None, 32, 32, 1], name='input')\n",
"network = conv_2d(network, 80, 3, activation='relu', regularizer=\"L2\")\n",
"network = max_pool_2d(network, 2)\n",
"network = local_response_normalization(network)\n",
"network = conv_2d(network, 64, 3, activation='relu', regularizer=\"L2\")\n",
"network = max_pool_2d(network, 2)\n",
"network = local_response_normalization(network)\n",
"network = fully_connected(network, 1024, activation='relu')\n",
"network = dropout(network, 0.8)\n",
"network = fully_connected(network, 512, activation='relu')\n",
"network = dropout(network, 0.8)\n",
"network = fully_connected(network, 28, activation='softmax')\n",
"network = regression(network, optimizer='sgd', learning_rate=0.01,\n",
" loss='categorical_crossentropy', name='target')\n",
"\n",
"#model complile\n",
"model = tflearn.DNN(network, tensorboard_verbose=0)\n",
"#model fitting\n",
"scores = []\n",
"\n",
"for i in range(100):\n",
" model.fit({'input': trainx}, {'target': trainy}, n_epoch=1,\n",
" validation_set=({'input': testx}, {'target': testy}),\n",
" snapshot_step=100, show_metric=True, run_id='convnet_arabic_digits')\n",
" score = model.evaluate(testx, testy)\n",
" print('Test accuracy: %0.2f%%' % (score[0] * 100))\n",
" scores.append(score[0]*100)\n",
" print(scores)\n",
" x = list(range(len(scores)))\n",
" y = []\n",
" for el in x:\n",
" y.append(el + 1)\n",
" plt.plot(y, scores, 'k-')\n",
" plt.title(\"Accuracy vs Epochs Trained\")\n",
" plt.xlabel(\"Num Epochs\")\n",
" plt.ylabel(\"Accuracy on Testing Data\")\n",
" plt.grid()\n",
" plt.show(block=False)\n",
" plt.pause(.1)\n",
"\n",
"plt.savefig('AccuracyGraph.pdf')\n",
"\n",
"print(scores)\n"
] | [
0,
0,
0.027777777777777776,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012345679012345678,
0.012658227848101266,
0,
0.012578616352201259,
0.012903225806451613,
0,
0.058823529411764705,
0,
0.058823529411764705,
0,
0.0625,
0,
0.0625,
0,
0,
0.02857142857142857,
0.05,
0,
0.029411764705882353,
0.05263157894736842,
0,
0,
0,
0,
0.011764705882352941,
0.024096385542168676,
0.022222222222222223,
0,
0.02564102564102564,
0,
0,
0.014925373134328358,
0,
0.02631578947368421,
0,
0.02702702702702703,
0,
0.010309278350515464,
0,
0.04081632653061224,
0.008771929824561403,
0.06504065040650407,
0,
0,
0,
0,
0.05,
0.02127659574468085,
0,
0,
0,
0,
0.03333333333333333,
0.02564102564102564,
0.02040816326530612,
0.08333333333333333,
0,
0.01904761904761905,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0625,
0,
0.06666666666666667,
0,
0,
0,
0,
0.015384615384615385,
0.0125,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 125 | 0.009461 |
#!/usr/bin/env python3
# THIS FILE IS PART OF THE CYLC SUITE ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""cylc [info] show [OPTIONS] ARGS
Query a running workflow for:
cylc show REG - workflow metadata
cylc show REG TASK_NAME - task metadata
cylc show REG TASK_GLOB - prerequisites and outputs of task instances
Prerequisite and output status is indicated for current active tasks.
"""
import json
import sys
from ansimarkup import ansiprint
from cylc.flow import ID_DELIM
from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.network.client import SuiteRuntimeClient
from cylc.flow.task_id import TaskID
from cylc.flow.terminal import cli_function
WORKFLOW_META_QUERY = '''
query ($wFlows: [ID]!) {
workflows (ids: $wFlows, stripNull: false) {
meta {
title
description
URL
userDefined
}
}
}
'''
TASK_META_QUERY = '''
query ($wFlows: [ID]!, $taskIds: [ID]) {
tasks (workflows: $wFlows, ids: $taskIds, stripNull: false) {
name
meta {
title
description
URL
userDefined
}
}
}
'''
TASK_PREREQS_QUERY = '''
query ($wFlows: [ID]!, $taskIds: [ID]) {
taskProxies (workflows: $wFlows, ids: $taskIds, stripNull: false) {
name
cyclePoint
task {
meta {
title
description
URL
userDefined
}
}
prerequisites {
expression
conditions {
exprAlias
taskId
reqState
message
satisfied
}
satisfied
}
outputs
extras
}
}
'''
def print_msg_state(msg, state):
if state:
ansiprint(f'<green> + {msg}</green>')
else:
ansiprint(f'<red> - {msg}</red>')
def flatten_data(data, flat_data=None):
if flat_data is None:
flat_data = {}
for key, value in data.items():
if isinstance(value, dict):
flatten_data(value, flat_data)
elif isinstance(value, list):
for member in value:
flatten_data(member, flat_data)
else:
flat_data[key] = value
return flat_data
def get_option_parser():
parser = COP(
__doc__, comms=True, multitask=True,
argdoc=[
('REG', 'Suite name'),
('[TASK_NAME or TASK_GLOB ...]', 'Task names or match patterns')])
parser.add_option('--list-prereqs', action="store_true", default=False,
help="Print a task's pre-requisites as a list.")
parser.add_option('--json', action="store_true", default=False,
help="Print output in JSON format.")
return parser
@cli_function(get_option_parser)
def main(_, options, suite, *task_args):
"""Implement "cylc show" CLI."""
pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)
json_filter = {}
if not task_args:
query = WORKFLOW_META_QUERY
query_kwargs = {
'request_string': query,
'variables': {'wFlows': [suite]}
}
# Print suite info.
results = pclient('graphql', query_kwargs)
for workflow in results['workflows']:
flat_data = flatten_data(workflow)
if options.json:
json_filter.update(flat_data)
else:
for key, value in sorted(flat_data.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')
task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]
task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]
if task_names:
tasks_query = TASK_META_QUERY
tasks_kwargs = {
'request_string': tasks_query,
'variables': {'wFlows': [suite], 'taskIds': task_names}
}
# Print suite info.
results = pclient('graphql', tasks_kwargs)
multi = len(results['tasks']) > 1
for task in results['tasks']:
flat_data = flatten_data(task['meta'])
if options.json:
json_filter.update({task['name']: flat_data})
else:
if multi:
print(f'----\nTASK NAME: {task["name"]}')
for key, value in sorted(flat_data.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold> {value or "<m>(not given)</m>"}')
if task_ids:
tp_query = TASK_PREREQS_QUERY
tp_kwargs = {
'request_string': tp_query,
'variables': {
'wFlows': [suite],
'taskIds': [
f'{c}{ID_DELIM}{n}'
for n, c in [
TaskID.split(t_id)
for t_id in task_ids
if TaskID.is_valid_id(t_id)
]
] + [
f'{c}{ID_DELIM}{n}'
for c, n in [
t_id.rsplit(TaskID.DELIM2, 1)
for t_id in task_ids
if not TaskID.is_valid_id(t_id)
]
]
}
}
results = pclient('graphql', tp_kwargs)
multi = len(results['taskProxies']) > 1
for t_proxy in results['taskProxies']:
task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])
if options.json:
json_filter.update({task_id: t_proxy})
else:
if multi:
print(f'----\nTASK ID: {task_id}')
prereqs = []
for item in t_proxy['prerequisites']:
prefix = ''
multi_cond = len(item['conditions']) > 1
if multi_cond:
prereqs.append([
True,
'',
item['expression'].replace('c', ''),
item['satisfied']
])
for cond in item['conditions']:
if multi_cond and not options.list_prereqs:
prefix = f'\t{cond["exprAlias"].strip("c")} = '
_, _, point, name = cond['taskId'].split(ID_DELIM)
cond_id = TaskID.get(name, point)
prereqs.append([
False,
prefix,
f'{cond_id} {cond["reqState"]}',
cond['satisfied']
])
if options.list_prereqs:
for composite, _, msg, _ in prereqs:
if not composite:
print(msg)
else:
flat_meta = flatten_data(t_proxy['task']['meta'])
for key, value in sorted(flat_meta.items(), reverse=True):
ansiprint(
f'<bold>{key}:</bold>'
f' {value or "<m>(not given)</m>"}')
ansiprint(
'\n<bold>prerequisites</bold>'
' (<red>- => not satisfied</red>):')
if not prereqs:
print(' (None)')
for _, prefix, msg, state in prereqs:
print_msg_state(f'{prefix}{msg}', state)
ansiprint(
'\n<bold>outputs</bold>'
' (<red>- => not completed</red>):')
if not t_proxy['outputs']:
print(' (None)')
for key, val in t_proxy['outputs'].items():
print_msg_state(f'{task_id} {key}', val)
if t_proxy['extras']:
print('\nother:')
for key, value in t_proxy['extras'].items():
print(' o %s ... %s' % (key, value))
if not results['taskProxies']:
ansiprint(
f"<red>No matching tasks found: {task_ids}",
file=sys.stderr)
sys.exit(1)
if options.json:
print(json.dumps(json_filter, indent=4))
if __name__ == "__main__":
main()
| [
"#!/usr/bin/env python3\n",
"\n",
"# THIS FILE IS PART OF THE CYLC SUITE ENGINE.\n",
"# Copyright (C) NIWA & British Crown (Met Office) & Contributors.\n",
"#\n",
"# This program is free software: you can redistribute it and/or modify\n",
"# it under the terms of the GNU General Public License as published by\n",
"# the Free Software Foundation, either version 3 of the License, or\n",
"# (at your option) any later version.\n",
"#\n",
"# This program is distributed in the hope that it will be useful,\n",
"# but WITHOUT ANY WARRANTY; without even the implied warranty of\n",
"# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n",
"# GNU General Public License for more details.\n",
"#\n",
"# You should have received a copy of the GNU General Public License\n",
"# along with this program. If not, see <http://www.gnu.org/licenses/>.\n",
"\n",
"\"\"\"cylc [info] show [OPTIONS] ARGS\n",
"\n",
"Query a running workflow for:\n",
" cylc show REG - workflow metadata\n",
" cylc show REG TASK_NAME - task metadata\n",
" cylc show REG TASK_GLOB - prerequisites and outputs of task instances\n",
"\n",
"Prerequisite and output status is indicated for current active tasks.\n",
"\"\"\"\n",
"\n",
"import json\n",
"import sys\n",
"\n",
"from ansimarkup import ansiprint\n",
"\n",
"from cylc.flow import ID_DELIM\n",
"from cylc.flow.option_parsers import CylcOptionParser as COP\n",
"from cylc.flow.network.client import SuiteRuntimeClient\n",
"from cylc.flow.task_id import TaskID\n",
"from cylc.flow.terminal import cli_function\n",
"\n",
"\n",
"WORKFLOW_META_QUERY = '''\n",
"query ($wFlows: [ID]!) {\n",
" workflows (ids: $wFlows, stripNull: false) {\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
"}\n",
"'''\n",
"\n",
"TASK_META_QUERY = '''\n",
"query ($wFlows: [ID]!, $taskIds: [ID]) {\n",
" tasks (workflows: $wFlows, ids: $taskIds, stripNull: false) {\n",
" name\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
"}\n",
"'''\n",
"\n",
"TASK_PREREQS_QUERY = '''\n",
"query ($wFlows: [ID]!, $taskIds: [ID]) {\n",
" taskProxies (workflows: $wFlows, ids: $taskIds, stripNull: false) {\n",
" name\n",
" cyclePoint\n",
" task {\n",
" meta {\n",
" title\n",
" description\n",
" URL\n",
" userDefined\n",
" }\n",
" }\n",
" prerequisites {\n",
" expression\n",
" conditions {\n",
" exprAlias\n",
" taskId\n",
" reqState\n",
" message\n",
" satisfied\n",
" }\n",
" satisfied\n",
" }\n",
" outputs\n",
" extras\n",
" }\n",
"}\n",
"'''\n",
"\n",
"\n",
"def print_msg_state(msg, state):\n",
" if state:\n",
" ansiprint(f'<green> + {msg}</green>')\n",
" else:\n",
" ansiprint(f'<red> - {msg}</red>')\n",
"\n",
"\n",
"def flatten_data(data, flat_data=None):\n",
" if flat_data is None:\n",
" flat_data = {}\n",
" for key, value in data.items():\n",
" if isinstance(value, dict):\n",
" flatten_data(value, flat_data)\n",
" elif isinstance(value, list):\n",
" for member in value:\n",
" flatten_data(member, flat_data)\n",
" else:\n",
" flat_data[key] = value\n",
" return flat_data\n",
"\n",
"\n",
"def get_option_parser():\n",
" parser = COP(\n",
" __doc__, comms=True, multitask=True,\n",
" argdoc=[\n",
" ('REG', 'Suite name'),\n",
" ('[TASK_NAME or TASK_GLOB ...]', 'Task names or match patterns')])\n",
"\n",
" parser.add_option('--list-prereqs', action=\"store_true\", default=False,\n",
" help=\"Print a task's pre-requisites as a list.\")\n",
"\n",
" parser.add_option('--json', action=\"store_true\", default=False,\n",
" help=\"Print output in JSON format.\")\n",
"\n",
" return parser\n",
"\n",
"\n",
"@cli_function(get_option_parser)\n",
"def main(_, options, suite, *task_args):\n",
" \"\"\"Implement \"cylc show\" CLI.\"\"\"\n",
" pclient = SuiteRuntimeClient(suite, timeout=options.comms_timeout)\n",
" json_filter = {}\n",
"\n",
" if not task_args:\n",
" query = WORKFLOW_META_QUERY\n",
" query_kwargs = {\n",
" 'request_string': query,\n",
" 'variables': {'wFlows': [suite]}\n",
" }\n",
" # Print suite info.\n",
" results = pclient('graphql', query_kwargs)\n",
" for workflow in results['workflows']:\n",
" flat_data = flatten_data(workflow)\n",
" if options.json:\n",
" json_filter.update(flat_data)\n",
" else:\n",
" for key, value in sorted(flat_data.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold> {value or \"<m>(not given)</m>\"}')\n",
"\n",
" task_names = [arg for arg in task_args if TaskID.is_valid_name(arg)]\n",
" task_ids = [arg for arg in task_args if TaskID.is_valid_id_2(arg)]\n",
"\n",
" if task_names:\n",
" tasks_query = TASK_META_QUERY\n",
" tasks_kwargs = {\n",
" 'request_string': tasks_query,\n",
" 'variables': {'wFlows': [suite], 'taskIds': task_names}\n",
" }\n",
" # Print suite info.\n",
" results = pclient('graphql', tasks_kwargs)\n",
" multi = len(results['tasks']) > 1\n",
" for task in results['tasks']:\n",
" flat_data = flatten_data(task['meta'])\n",
" if options.json:\n",
" json_filter.update({task['name']: flat_data})\n",
" else:\n",
" if multi:\n",
" print(f'----\\nTASK NAME: {task[\"name\"]}')\n",
" for key, value in sorted(flat_data.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold> {value or \"<m>(not given)</m>\"}')\n",
"\n",
" if task_ids:\n",
" tp_query = TASK_PREREQS_QUERY\n",
" tp_kwargs = {\n",
" 'request_string': tp_query,\n",
" 'variables': {\n",
" 'wFlows': [suite],\n",
" 'taskIds': [\n",
" f'{c}{ID_DELIM}{n}'\n",
" for n, c in [\n",
" TaskID.split(t_id)\n",
" for t_id in task_ids\n",
" if TaskID.is_valid_id(t_id)\n",
" ]\n",
" ] + [\n",
" f'{c}{ID_DELIM}{n}'\n",
" for c, n in [\n",
" t_id.rsplit(TaskID.DELIM2, 1)\n",
" for t_id in task_ids\n",
" if not TaskID.is_valid_id(t_id)\n",
" ]\n",
" ]\n",
" }\n",
" }\n",
" results = pclient('graphql', tp_kwargs)\n",
" multi = len(results['taskProxies']) > 1\n",
" for t_proxy in results['taskProxies']:\n",
" task_id = TaskID.get(t_proxy['name'], t_proxy['cyclePoint'])\n",
" if options.json:\n",
" json_filter.update({task_id: t_proxy})\n",
" else:\n",
" if multi:\n",
" print(f'----\\nTASK ID: {task_id}')\n",
" prereqs = []\n",
" for item in t_proxy['prerequisites']:\n",
" prefix = ''\n",
" multi_cond = len(item['conditions']) > 1\n",
" if multi_cond:\n",
" prereqs.append([\n",
" True,\n",
" '',\n",
" item['expression'].replace('c', ''),\n",
" item['satisfied']\n",
" ])\n",
" for cond in item['conditions']:\n",
" if multi_cond and not options.list_prereqs:\n",
" prefix = f'\\t{cond[\"exprAlias\"].strip(\"c\")} = '\n",
" _, _, point, name = cond['taskId'].split(ID_DELIM)\n",
" cond_id = TaskID.get(name, point)\n",
" prereqs.append([\n",
" False,\n",
" prefix,\n",
" f'{cond_id} {cond[\"reqState\"]}',\n",
" cond['satisfied']\n",
" ])\n",
" if options.list_prereqs:\n",
" for composite, _, msg, _ in prereqs:\n",
" if not composite:\n",
" print(msg)\n",
" else:\n",
" flat_meta = flatten_data(t_proxy['task']['meta'])\n",
" for key, value in sorted(flat_meta.items(), reverse=True):\n",
" ansiprint(\n",
" f'<bold>{key}:</bold>'\n",
" f' {value or \"<m>(not given)</m>\"}')\n",
" ansiprint(\n",
" '\\n<bold>prerequisites</bold>'\n",
" ' (<red>- => not satisfied</red>):')\n",
" if not prereqs:\n",
" print(' (None)')\n",
" for _, prefix, msg, state in prereqs:\n",
" print_msg_state(f'{prefix}{msg}', state)\n",
"\n",
" ansiprint(\n",
" '\\n<bold>outputs</bold>'\n",
" ' (<red>- => not completed</red>):')\n",
" if not t_proxy['outputs']:\n",
" print(' (None)')\n",
" for key, val in t_proxy['outputs'].items():\n",
" print_msg_state(f'{task_id} {key}', val)\n",
" if t_proxy['extras']:\n",
" print('\\nother:')\n",
" for key, value in t_proxy['extras'].items():\n",
" print(' o %s ... %s' % (key, value))\n",
" if not results['taskProxies']:\n",
" ansiprint(\n",
" f\"<red>No matching tasks found: {task_ids}\",\n",
" file=sys.stderr)\n",
" sys.exit(1)\n",
"\n",
" if options.json:\n",
" print(json.dumps(json_filter, indent=4))\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 276 | 0 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import os
import re
import subprocess
# Matches the output lines from the 'ldd' tool. For example:
# libcrypto.so.10 => /path/to/usr/lib64/libcrypto.so.10 (0x00007fb0cb0a5000)
#
# Note: The following pattern will not match the following two types of
# dependencies and so they will not be included in the output from this module:
#
# 1. The dynamic linker:
# /lib64/ld-linux-x86-64.so.2 (0x00007f6f7ab79000)
# 2. Linux virtual dynamic shared objects:
# linux-vdso.so.1 (0x00007ffc06cfb000)
#
LDD_RE = re.compile(r'^\s+.+? => (\S+) \(0x.+\)')
class DependencyExtractor(object):
"""
This class extracts native library dependencies from the given executable.
"""
def __init__(self):
self.deps_cache = {}
self.lib_allowed_filter = lambda path: True
self.enable_expand_symlinks = False
def set_library_filter(self, lib_allowed_filter):
"""
Specify a filter predicate that should return True iff the specified
library path should be included in the result from extract_deps().
By default, all libraries are included in the result.
"""
self.lib_allowed_filter = lib_allowed_filter
def set_expand_symlinks(self, expand):
"""
Specify whether symlinks should be expanded in the output from
extract_deps(). By default, symlinks are not expanded. See
expand_symlinks().
"""
self.enable_expand_symlinks = expand
def expand_symlinks(self, deps):
"""
ldd will often point to symlinks. Return a list including any symlink in
the specified dependency list as well as whatever it's pointing to,
recursively.
"""
expanded = []
for path in deps:
expanded.append(path)
while os.path.islink(path):
# TODO(mpercy): os.readlink() can return an absolute path. Should we more carefully handle
# the path concatenation here?
path = os.path.join(os.path.dirname(path), os.readlink(path))
expanded.append(path)
return expanded
def extract_deps(self, exe):
"""
Runs 'ldd' on the provided 'exe' path, returning a list of
any libraries it depends on. Blacklisted libraries are
removed from this list.
If the provided 'exe' is not a binary executable, returns
an empty list.
"""
if (exe.endswith(".jar") or
exe.endswith(".pl") or
exe.endswith(".py") or
exe.endswith(".sh") or
exe.endswith(".txt") or
os.path.isdir(exe)):
return []
if exe not in self.deps_cache:
p = subprocess.Popen(["ldd", exe], stdout=subprocess.PIPE)
out, err = p.communicate()
self.deps_cache[exe] = (out, err, p.returncode)
out, err, rc = self.deps_cache[exe]
if rc != 0:
logging.warning("failed to run ldd on %s", exe)
return []
deps = []
for line in out.splitlines():
match = LDD_RE.match(line)
if not match:
continue
dep = match.group(1)
# Apply the provided predicate.
if not self.lib_allowed_filter(dep):
continue
deps.append(dep)
if self.enable_expand_symlinks:
deps = self.expand_symlinks(deps)
return deps
| [
"#!/usr/bin/env python\n",
"#\n",
"# Licensed to the Apache Software Foundation (ASF) under one\n",
"# or more contributor license agreements. See the NOTICE file\n",
"# distributed with this work for additional information\n",
"# regarding copyright ownership. The ASF licenses this file\n",
"# to you under the Apache License, Version 2.0 (the\n",
"# \"License\"); you may not use this file except in compliance\n",
"# with the License. You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing,\n",
"# software distributed under the License is distributed on an\n",
"# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n",
"# KIND, either express or implied. See the License for the\n",
"# specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import logging\n",
"import os\n",
"import re\n",
"import subprocess\n",
"\n",
"# Matches the output lines from the 'ldd' tool. For example:\n",
"# libcrypto.so.10 => /path/to/usr/lib64/libcrypto.so.10 (0x00007fb0cb0a5000)\n",
"#\n",
"# Note: The following pattern will not match the following two types of\n",
"# dependencies and so they will not be included in the output from this module:\n",
"#\n",
"# 1. The dynamic linker:\n",
"# /lib64/ld-linux-x86-64.so.2 (0x00007f6f7ab79000)\n",
"# 2. Linux virtual dynamic shared objects:\n",
"# linux-vdso.so.1 (0x00007ffc06cfb000)\n",
"#\n",
"LDD_RE = re.compile(r'^\\s+.+? => (\\S+) \\(0x.+\\)')\n",
"\n",
"class DependencyExtractor(object):\n",
" \"\"\"\n",
" This class extracts native library dependencies from the given executable.\n",
" \"\"\"\n",
" def __init__(self):\n",
" self.deps_cache = {}\n",
" self.lib_allowed_filter = lambda path: True\n",
" self.enable_expand_symlinks = False\n",
"\n",
" def set_library_filter(self, lib_allowed_filter):\n",
" \"\"\"\n",
" Specify a filter predicate that should return True iff the specified\n",
" library path should be included in the result from extract_deps().\n",
" By default, all libraries are included in the result.\n",
" \"\"\"\n",
" self.lib_allowed_filter = lib_allowed_filter\n",
"\n",
" def set_expand_symlinks(self, expand):\n",
" \"\"\"\n",
" Specify whether symlinks should be expanded in the output from\n",
" extract_deps(). By default, symlinks are not expanded. See\n",
" expand_symlinks().\n",
" \"\"\"\n",
" self.enable_expand_symlinks = expand\n",
"\n",
" def expand_symlinks(self, deps):\n",
" \"\"\"\n",
" ldd will often point to symlinks. Return a list including any symlink in\n",
" the specified dependency list as well as whatever it's pointing to,\n",
" recursively.\n",
" \"\"\"\n",
" expanded = []\n",
" for path in deps:\n",
" expanded.append(path)\n",
" while os.path.islink(path):\n",
" # TODO(mpercy): os.readlink() can return an absolute path. Should we more carefully handle\n",
" # the path concatenation here?\n",
" path = os.path.join(os.path.dirname(path), os.readlink(path))\n",
" expanded.append(path)\n",
" return expanded\n",
"\n",
" def extract_deps(self, exe):\n",
" \"\"\"\n",
" Runs 'ldd' on the provided 'exe' path, returning a list of\n",
" any libraries it depends on. Blacklisted libraries are\n",
" removed from this list.\n",
"\n",
" If the provided 'exe' is not a binary executable, returns\n",
" an empty list.\n",
" \"\"\"\n",
" if (exe.endswith(\".jar\") or\n",
" exe.endswith(\".pl\") or\n",
" exe.endswith(\".py\") or\n",
" exe.endswith(\".sh\") or\n",
" exe.endswith(\".txt\") or\n",
" os.path.isdir(exe)):\n",
" return []\n",
"\n",
" if exe not in self.deps_cache:\n",
" p = subprocess.Popen([\"ldd\", exe], stdout=subprocess.PIPE)\n",
" out, err = p.communicate()\n",
" self.deps_cache[exe] = (out, err, p.returncode)\n",
"\n",
" out, err, rc = self.deps_cache[exe]\n",
" if rc != 0:\n",
" logging.warning(\"failed to run ldd on %s\", exe)\n",
" return []\n",
"\n",
" deps = []\n",
" for line in out.splitlines():\n",
" match = LDD_RE.match(line)\n",
" if not match:\n",
" continue\n",
" dep = match.group(1)\n",
" # Apply the provided predicate.\n",
" if not self.lib_allowed_filter(dep):\n",
" continue\n",
" deps.append(dep)\n",
"\n",
" if self.enable_expand_symlinks:\n",
" deps = self.expand_symlinks(deps)\n",
" return deps\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0.16666666666666666,
0,
0,
0.045454545454545456,
0,
0,
0,
0,
0.019230769230769232,
0,
0,
0,
0,
0,
0,
0,
0.024390243902439025,
0,
0,
0,
0,
0,
0,
0,
0.02857142857142857,
0,
0,
0,
0,
0,
0,
0,
0.03571428571428571,
0.029411764705882353,
0.010101010101010102,
0,
0,
0,
0,
0,
0.03225806451612903,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.034482758620689655,
0.0625,
0,
0,
0.015384615384615385,
0.030303030303030304,
0.018518518518518517,
0,
0,
0,
0.018518518518518517,
0.0625,
0,
0,
0,
0.030303030303030304,
0.05,
0,
0.037037037037037035,
0.02631578947368421,
0.023255813953488372,
0,
0.043478260869565216,
0,
0,
0.025,
0
] | 119 | 0.007546 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_log_forwarding_profile
short_description: Manage log forwarding profiles.
description:
- Manages log forwarding profiles.
author: "Garfield Lee Freeman (@shinmog)"
version_added: "2.8"
requirements:
- pan-python
- pandevice >= 0.11.1
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- panos.transitional_provider
- panos.vsys_shared
- panos.device_group
options:
name:
description:
- Name of the profile.
required: true
description:
description:
- Profile description
enhanced_logging:
description:
- Valid for PAN-OS 8.1+
- Enabling enhanced application logging.
type: 'bool'
'''
EXAMPLES = '''
# Create a profile
- name: Create log forwarding profile
panos_log_forwarding_profile:
provider: '{{ provider }}'
name: 'my-profile'
enhanced_logging: true
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.panos.panos import get_connection
try:
from pandevice.objects import LogForwardingProfile
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys_shared=True,
device_group=True,
with_state=True,
with_classic_provider_spec=True,
min_pandevice_version=(0, 11, 1),
min_panos_version=(8, 0, 0),
argument_spec=dict(
name=dict(required=True),
description=dict(),
enhanced_logging=dict(type='bool'),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
try:
listing = LogForwardingProfile.refreshall(parent)
except PanDeviceError as e:
module.fail_json(msg='Failed refresh: {0}'.format(e))
spec = {
'name': module.params['name'],
'description': module.params['description'],
'enhanced_logging': module.params['enhanced_logging'],
}
obj = LogForwardingProfile(**spec)
parent.add(obj)
changed = helper.apply_state(obj, listing, module)
module.exit_json(changed=changed, msg='Done')
if __name__ == '__main__':
main()
| [
"#!/usr/bin/env python\n",
"# -*- coding: utf-8 -*-\n",
"\n",
"from __future__ import absolute_import, division, print_function\n",
"__metaclass__ = type\n",
"\n",
"# Copyright 2019 Palo Alto Networks, Inc\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\");\n",
"# you may not use this file except in compliance with the License.\n",
"# You may obtain a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS,\n",
"# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
"# See the License for the specific language governing permissions and\n",
"# limitations under the License.\n",
"\n",
"ANSIBLE_METADATA = {'metadata_version': '1.1',\n",
" 'status': ['preview'],\n",
" 'supported_by': 'community'}\n",
"\n",
"DOCUMENTATION = '''\n",
"---\n",
"module: panos_log_forwarding_profile\n",
"short_description: Manage log forwarding profiles.\n",
"description:\n",
" - Manages log forwarding profiles.\n",
"author: \"Garfield Lee Freeman (@shinmog)\"\n",
"version_added: \"2.8\"\n",
"requirements:\n",
" - pan-python\n",
" - pandevice >= 0.11.1\n",
"notes:\n",
" - Panorama is supported.\n",
" - Check mode is supported.\n",
"extends_documentation_fragment:\n",
" - panos.transitional_provider\n",
" - panos.vsys_shared\n",
" - panos.device_group\n",
"options:\n",
" name:\n",
" description:\n",
" - Name of the profile.\n",
" required: true\n",
" description:\n",
" description:\n",
" - Profile description\n",
" enhanced_logging:\n",
" description:\n",
" - Valid for PAN-OS 8.1+\n",
" - Enabling enhanced application logging.\n",
" type: 'bool'\n",
"'''\n",
"\n",
"EXAMPLES = '''\n",
"# Create a profile\n",
"- name: Create log forwarding profile\n",
" panos_log_forwarding_profile:\n",
" provider: '{{ provider }}'\n",
" name: 'my-profile'\n",
" enhanced_logging: true\n",
"'''\n",
"\n",
"RETURN = '''\n",
"# Default return values\n",
"'''\n",
"\n",
"from ansible.module_utils.basic import AnsibleModule\n",
"from ansible.module_utils.network.panos.panos import get_connection\n",
"\n",
"\n",
"try:\n",
" from pandevice.objects import LogForwardingProfile\n",
" from pandevice.errors import PanDeviceError\n",
"except ImportError:\n",
" pass\n",
"\n",
"\n",
"def main():\n",
" helper = get_connection(\n",
" vsys_shared=True,\n",
" device_group=True,\n",
" with_state=True,\n",
" with_classic_provider_spec=True,\n",
" min_pandevice_version=(0, 11, 1),\n",
" min_panos_version=(8, 0, 0),\n",
" argument_spec=dict(\n",
" name=dict(required=True),\n",
" description=dict(),\n",
" enhanced_logging=dict(type='bool'),\n",
" ),\n",
" )\n",
" module = AnsibleModule(\n",
" argument_spec=helper.argument_spec,\n",
" supports_check_mode=True,\n",
" required_one_of=helper.required_one_of,\n",
" )\n",
"\n",
" # Verify imports, build pandevice object tree.\n",
" parent = helper.get_pandevice_parent(module)\n",
"\n",
" try:\n",
" listing = LogForwardingProfile.refreshall(parent)\n",
" except PanDeviceError as e:\n",
" module.fail_json(msg='Failed refresh: {0}'.format(e))\n",
"\n",
" spec = {\n",
" 'name': module.params['name'],\n",
" 'description': module.params['description'],\n",
" 'enhanced_logging': module.params['enhanced_logging'],\n",
" }\n",
" obj = LogForwardingProfile(**spec)\n",
" parent.add(obj)\n",
"\n",
" changed = helper.apply_state(obj, listing, module)\n",
" module.exit_json(changed=changed, msg='Done')\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.018867924528301886,
0.014705882352941176,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 123 | 0.000273 |
# Copyright 2015, Yahoo Inc.
# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.
from pyspark.context import SparkContext
import numpy as np
import base64
import cPickle as pkl
from tempfile import NamedTemporaryFile
import os
import subprocess
from operator import add
def default_data_loading(sc, data_path, sampling_ratio, seed):
"""
This function loads training data from a text file, sampling it by the provided
ratio and random seed, and interprets each line as a tab-separated (id, data) pair
where 'data' is assumed to be a base64-encoded pickled numpy array. The ids are discarded.
The data is returned as an RDD of numpy arrays.
"""
# Compute the number of cores in our cluster - used below to heuristically set the number of partitions
# TypeError: int() argument must be a string or a number, not 'NoneType' ?
print sc._conf.toDebugString()
#print sc._conf.getAll()
try:
nb_instances = int(sc._conf.get('spark.executor.instances'))
except Exception as inst:
print "[default_data_loading: error] {}. Setting nb_instances to 2.".format(inst)
nb_instances = 2
try:
nb_executor_cores = int(sc._conf.get('spark.executor.cores'))
except Exception as inst:
print "[default_data_loading: error] {}. Setting nb_executor_cores to 2.".format(inst)
nb_executor_cores = 2
total_cores = nb_instances * nb_executor_cores
# Load and sample down the dataset
d = sc.textFile(data_path, total_cores * 3).sample(False, sampling_ratio, seed)
# The data is (id, vector) tab-delimited pairs where each vector is
# a base64-encoded pickled numpy array
deserialize_vec = lambda s: pkl.loads(base64.decodestring(s.split('\t')[1]))
vecs = d.map(deserialize_vec)
return vecs
def main(sc, args, data_load_fn=default_data_loading):
def seqOp(a, b):
a += np.outer(b, b)
return a
def combOp(a, b):
a += b
return a
# Load data
d = data_load_fn(sc, args.data, args.sampling_ratio, args.seed)
d.cache()
# Determine the data dimension
D = len(d.first())
print "d.first: {}, D: {}".format(d.first(),D)
# Count data points
count = d.count()
mu = d.aggregate(np.zeros(D), add, add)
mu = mu / float(count)
# Compute covariance estimator
summed_covar = d.treeAggregate(np.zeros((D, D)), seqOp, combOp, depth=args.agg_depth)
A = summed_covar / (count - 1) - np.outer(mu, mu)
E, P = np.linalg.eigh(A)
params = {
'mu': mu, # mean
'P': P, # PCA matrix
'E': E, # eigenvalues
'A': A, # covariance matrix
'c': count # sample size
}
save_hdfs_pickle(params, args.output)
def save_hdfs_pickle(m, pkl_path):
"""
Given a python object and a path on hdfs, save the object as a pickle file locally and copy the file
to the hdfs path.
"""
print 'Saving pickle to temp file...'
f = NamedTemporaryFile(delete=False)
pkl.dump(m, f, -1)
f.close()
print 'Copying pickle file to hdfs...'
copy_to_hdfs(f, pkl_path)
os.remove(f.name)
def copy_to_hdfs(f, hdfs_path):
subprocess.call(['hadoop', 'fs', '-copyFromLocal', f.name, hdfs_path])
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
# Data handling parameters
parser.add_argument('--data', dest='data', type=str, required=True, help='hdfs path to input data')
parser.add_argument('--data_udf', dest='data_udf', type=str, default=None, help='module name from which to load a data loading UDF')
parser.add_argument('--seed', dest='seed', type=int, default=None, help='optional random seed')
parser.add_argument('--sampling_ratio', dest='sampling_ratio', type=float, default=1.0, help='proportion of data to sample for training')
parser.add_argument('--agg_depth', dest='agg_depth', type=int, default=4, help='depth of tree aggregation to compute covariance estimator')
parser.add_argument('--output', dest='output', type=str, default=None, help='hdfs path to output pickle file of parameters')
args = parser.parse_args()
sc = SparkContext(appName='PCA')
# Load UDF module if provided
if args.data_udf:
sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/memex_udf.py')
sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/deepsentibanktf_udf.py')
udf_module = __import__(args.data_udf, fromlist=['udf'])
load_udf = udf_module.udf
main(sc, args, data_load_fn=load_udf)
else:
main(sc, args)
sc.stop()
| [
"# Copyright 2015, Yahoo Inc.\n",
"# Licensed under the terms of the Apache License, Version 2.0. See the LICENSE file associated with the project for terms.\n",
"from pyspark.context import SparkContext\n",
"\n",
"import numpy as np\n",
"import base64\n",
"import cPickle as pkl\n",
"from tempfile import NamedTemporaryFile\n",
"import os\n",
"import subprocess\n",
"from operator import add\n",
"\n",
"\n",
"def default_data_loading(sc, data_path, sampling_ratio, seed):\n",
" \"\"\"\n",
" This function loads training data from a text file, sampling it by the provided\n",
" ratio and random seed, and interprets each line as a tab-separated (id, data) pair\n",
" where 'data' is assumed to be a base64-encoded pickled numpy array. The ids are discarded.\n",
" The data is returned as an RDD of numpy arrays.\n",
" \"\"\"\n",
" # Compute the number of cores in our cluster - used below to heuristically set the number of partitions\n",
" # TypeError: int() argument must be a string or a number, not 'NoneType' ?\n",
" print sc._conf.toDebugString()\n",
" #print sc._conf.getAll()\n",
" try:\n",
" nb_instances = int(sc._conf.get('spark.executor.instances'))\n",
" except Exception as inst:\n",
" print \"[default_data_loading: error] {}. Setting nb_instances to 2.\".format(inst)\n",
" nb_instances = 2\n",
" try:\n",
" nb_executor_cores = int(sc._conf.get('spark.executor.cores'))\n",
" except Exception as inst:\n",
" print \"[default_data_loading: error] {}. Setting nb_executor_cores to 2.\".format(inst)\n",
" nb_executor_cores = 2\n",
"\n",
"\n",
" total_cores = nb_instances * nb_executor_cores\n",
"\n",
" # Load and sample down the dataset\n",
" d = sc.textFile(data_path, total_cores * 3).sample(False, sampling_ratio, seed)\n",
"\n",
" # The data is (id, vector) tab-delimited pairs where each vector is\n",
" # a base64-encoded pickled numpy array\n",
" deserialize_vec = lambda s: pkl.loads(base64.decodestring(s.split('\\t')[1]))\n",
" vecs = d.map(deserialize_vec)\n",
"\n",
" return vecs\n",
"\n",
"\n",
"def main(sc, args, data_load_fn=default_data_loading):\n",
"\n",
" def seqOp(a, b):\n",
" a += np.outer(b, b)\n",
" return a\n",
"\n",
" def combOp(a, b):\n",
" a += b\n",
" return a\n",
"\n",
" # Load data\n",
" d = data_load_fn(sc, args.data, args.sampling_ratio, args.seed)\n",
" d.cache()\n",
"\n",
" # Determine the data dimension\n",
" D = len(d.first())\n",
" print \"d.first: {}, D: {}\".format(d.first(),D)\n",
"\n",
" # Count data points\n",
" count = d.count()\n",
" mu = d.aggregate(np.zeros(D), add, add)\n",
" mu = mu / float(count)\n",
"\n",
" # Compute covariance estimator\n",
" summed_covar = d.treeAggregate(np.zeros((D, D)), seqOp, combOp, depth=args.agg_depth)\n",
"\n",
" A = summed_covar / (count - 1) - np.outer(mu, mu)\n",
" E, P = np.linalg.eigh(A)\n",
"\n",
" params = {\n",
" 'mu': mu, # mean\n",
" 'P': P, # PCA matrix\n",
" 'E': E, # eigenvalues\n",
" 'A': A, # covariance matrix\n",
" 'c': count # sample size\n",
" }\n",
"\n",
" save_hdfs_pickle(params, args.output)\n",
"\n",
"\n",
"def save_hdfs_pickle(m, pkl_path):\n",
" \"\"\"\n",
" Given a python object and a path on hdfs, save the object as a pickle file locally and copy the file\n",
" to the hdfs path.\n",
" \"\"\"\n",
" print 'Saving pickle to temp file...'\n",
" f = NamedTemporaryFile(delete=False)\n",
" pkl.dump(m, f, -1)\n",
" f.close()\n",
"\n",
" print 'Copying pickle file to hdfs...'\n",
" copy_to_hdfs(f, pkl_path)\n",
" os.remove(f.name)\n",
"\n",
"\n",
"def copy_to_hdfs(f, hdfs_path):\n",
" subprocess.call(['hadoop', 'fs', '-copyFromLocal', f.name, hdfs_path])\n",
"\n",
"\n",
"if __name__ == '__main__':\n",
" from argparse import ArgumentParser\n",
" parser = ArgumentParser()\n",
"\n",
" # Data handling parameters\n",
" parser.add_argument('--data', dest='data', type=str, required=True, help='hdfs path to input data')\n",
" parser.add_argument('--data_udf', dest='data_udf', type=str, default=None, help='module name from which to load a data loading UDF')\n",
" parser.add_argument('--seed', dest='seed', type=int, default=None, help='optional random seed')\n",
" parser.add_argument('--sampling_ratio', dest='sampling_ratio', type=float, default=1.0, help='proportion of data to sample for training')\n",
" parser.add_argument('--agg_depth', dest='agg_depth', type=int, default=4, help='depth of tree aggregation to compute covariance estimator')\n",
"\n",
" parser.add_argument('--output', dest='output', type=str, default=None, help='hdfs path to output pickle file of parameters')\n",
"\n",
" args = parser.parse_args()\n",
"\n",
" sc = SparkContext(appName='PCA')\n",
"\n",
" # Load UDF module if provided\n",
" if args.data_udf:\n",
" sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/memex_udf.py')\n",
" sc.addPyFile('hdfs://memex/user/skaraman/build-lopq-index/lopq/spark/deepsentibanktf_udf.py')\n",
" udf_module = __import__(args.data_udf, fromlist=['udf'])\n",
" load_udf = udf_module.udf\n",
" main(sc, args, data_load_fn=load_udf)\n",
" else:\n",
" main(sc, args)\n",
"\n",
" sc.stop()\n"
] | [
0,
0.008130081300813009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0.011494252873563218,
0.010526315789473684,
0,
0,
0.009259259259259259,
0,
0,
0.034482758620689655,
0,
0.014285714285714285,
0,
0.011111111111111112,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0.0196078431372549,
0,
0,
0.011904761904761904,
0,
0,
0,
0.024691358024691357,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.0196078431372549,
0,
0,
0,
0,
0,
0,
0,
0.011111111111111112,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009523809523809525,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009615384615384616,
0.0072992700729927005,
0.01,
0.007042253521126761,
0.006944444444444444,
0,
0.007751937984496124,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0.00980392156862745,
0,
0,
0,
0,
0,
0,
0
] | 136 | 0.002114 |
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import logging
import os
from datetime import datetime
try:
import defusedxml.minidom as xml
except ImportError:
import xml.dom.minidom as xml
class instagram:
def __init__(self, cookie):
"""This sets up this class to communicate with Instagram.
Args:
cookie: A dictionary object with the required cookie values (ds_user_id, sessionid, csrftoken).
"""
self.userid = cookie["ds_user_id"]
self.sessionid = cookie["sessionid"]
self.csrftoken = cookie["csrftoken"]
self.mid = cookie["mid"]
self.headers = {
"accept" : "*/*",
"accept-encoding" : "gzip, deflate",
"accept-language" : "en-US",
"content_type" : "application/x-www-form-urlencoded; charset=UTF-8",
"cache-control" : "no-cache",
"cookie" : "ds_user_id=" + self.userid + "; sessionid=" + self.sessionid + "; csrftoken=" + self.csrftoken + "; mid=" + self.mid,
"dnt" : "1",
# "pragma" : "no-cache",
# "referer" : "https://www.instagram.com/",
"user-agent" : "Instagram 10.26.0 (iPhone7,2; iOS 10_1_1; en_US; en-US; scale=2.00; gamut=normal; 750x1334) AppleWebKit/420+",
"x-ig-capabilities": "36oD",
# "x-ig-connection-type" : "WIFI",
# "x-ig-fb-http-engine" : "Liger"
}
self.session = requests.Session()
max_tries = 3
backoff_factor = 0.2
status_forcelist = (500, 502, 503, 504)
retry = Retry(total=max_tries, read=max_tries, connect=max_tries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
self.session.mount('http://', adapter)
self.session.mount('https://', adapter)
self.session.headers = self.headers
def getReelTray(self):
"""Get reel tray from API.
Returns:
Response object with reel tray API response
"""
endpoint = "https://i.instagram.com/api/v1/feed/reels_tray/"
response = self.session.get(endpoint, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
return response
def getReelMedia(self, user):
"""Get reel media of a user from API.
Args:
user: User ID
Returns:
Response object with reel media API response
"""
endpoint = "https://i.instagram.com/api/v1/feed/user/" + str(user) + "/reel_media/"
response = self.session.get(endpoint, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
return response
def getStories(self):
return self.getReelTray()
def getUserStories(self, user):
return self.getReelMedia(user)
def getUserIDs(self, json: dict) -> list:
"""Extract user IDs from reel tray JSON.
Args:
json: Reel tray response from IG
Returns:
List of user IDs
"""
users = []
for user in json['tray']:
users.append(user['user']['pk'])
return users
def getFile(self, url: str, dest: str):
"""Download file and save to destination
Args:
url: URL of item to download
dest: File system destination to save item to
Returns:
None
"""
logging.debug("URL: %s", url)
logging.debug("Dest: %s", dest)
try:
if os.path.getsize(dest) == 0:
logging.info("Empty file exists. Removing.")
os.remove(dest)
except FileNotFoundError:
pass
try:
dirpath = os.path.dirname(dest)
os.makedirs(dirpath, exist_ok=True)
with open(dest, "xb") as handle:
response = self.session.get(url, stream=True, timeout=60)
if response.status_code != requests.codes.ok:
logging.error("Status Code Error." + str(response.status_code))
response.raise_for_status()
for data in response.iter_content(chunk_size=4194304):
handle.write(data)
handle.close()
except FileExistsError:
logging.info("File already exists.")
if os.path.getsize(dest) == 0:
logging.info("Error downloading. Removing.")
os.remove(dest)
def formatPath(self, user: str, pk: int, timestamp: int, postid: str, mediatype: int) -> str:
"""Format download path to a specific format/template
Args:
user: User name
pk: User ID
timestamp: UTC Unix timestamp
postid: Post ID
mediatype: Media type as defined by IG
Returns:
None
"""
dirpath = os.path.dirname(__file__)
utcdatetime = datetime.utcfromtimestamp(timestamp).strftime("%Y-%m-%d-%H-%M-%S")
if mediatype == 1:
ext = ".jpg"
type = "stories"
elif mediatype == 2:
ext = ".mp4"
type = "stories"
elif mediatype == 3:
ext = ".mp4"
type = "livestories"
else:
ext = ""
type = "other"
path = os.path.join(dirpath, "downloads", user + "_" + str(pk), type, utcdatetime + "_" + str(timestamp) + "_" + postid + ext)
return path
def downloadReel(self, resp):
"""Download stories of a followed user's tray.
Download the stories of a followed user.
Args:
resp: JSON dictionary of reel from IG API
Returns:
None
"""
try:
for index, item in enumerate(resp['items']):
logging.debug(' ' + str(index))
username = item['user']['username']
userpk = item['user']['pk']
timestamp = item['taken_at']
postid = item['id']
mediatype = item['media_type']
if mediatype == 2: # Video
largest = 0
for versionindex, video in enumerate(item['video_versions']):
itemsize = video['width'] * video['height']
largestsize = item['video_versions'][largest]['width'] * \
item['video_versions'][largest]['height']
if itemsize > largestsize:
largest = versionindex
logging.debug(' V' + str(largest))
url = item['video_versions'][largest]['url']
logging.debug(' ' + url)
elif mediatype == 1: # Image
largest = 0
for versionindex, image in enumerate(item['image_versions2']['candidates']):
itemsize = image['width'] * image['height']
largestsize = item['image_versions2']['candidates'][largest]['width'] * \
item['image_versions2']['candidates'][largest]['height']
if itemsize > largestsize:
largest = versionindex
logging.debug(' I' + str(largest))
url = item['image_versions2']['candidates'][largest]['url']
logging.debug(' ' + url)
else: # Unknown
logging.debug(' E')
url = None
pass
path = self.formatPath(username, userpk, timestamp, postid, mediatype)
self.getFile(url, path)
except KeyError: # JSON 'item' key does not exist for later items in tray as of 6/2/2017
pass
def downloadTray(self, resp):
"""Download stories of logged in user's tray.
Download the stories as available in the tray. The tray contains a list of
reels, a collection of the stories posted by a followed user.
The tray only contains a small set of reels of the first few users. To download
the rest, a reel must be obtained for each user in the tray.
Args:
resp: JSON dictionary of tray from IG API
Returns:
None
"""
for reel in resp['tray']:
self.downloadReel(reel)
def downloadStoryLive(self, resp):
"""Download post-live stories of a followed user's tray.
Download the post-live stories of a followed user.
Args:
resp: JSON dictionary of reel from IG API
Returns:
None
"""
try:
for index,item in enumerate(resp["post_live"]["post_live_items"]):
logging.debug(' ' + str(index))
username = item["user"]["username"]
userpk = item["user"]["pk"]
for bindex,broadcast in enumerate(item["broadcasts"]):
logging.debug(' ' + str(bindex))
timestamp = broadcast["published_time"]
postid = broadcast["media_id"]
dash = broadcast["dash_manifest"]
dashxml = xml.parseString(dash)
elements = dashxml.getElementsByTagName("BaseURL")
for eindex,element in enumerate(elements):
for node in element.childNodes:
if node.nodeType == node.TEXT_NODE:
url = node.data
mediatype = 3
path = self.formatPath(username, userpk, timestamp, postid + "_" + str(eindex), mediatype)
self.getFile(url, path)
except KeyError: # No "post_live" key
logging.debug(' ' + 'No live stories.')
def close(self):
"""Close seesion to IG
Returns:
None
"""
self.session.close()
| [
"import requests\n",
"from requests.adapters import HTTPAdapter\n",
"from requests.packages.urllib3.util.retry import Retry\n",
"import logging\n",
"import os\n",
"from datetime import datetime\n",
"try:\n",
" import defusedxml.minidom as xml\n",
"except ImportError:\n",
" import xml.dom.minidom as xml\n",
"\n",
"class instagram:\n",
" def __init__(self, cookie):\n",
" \"\"\"This sets up this class to communicate with Instagram.\n",
"\n",
" Args:\n",
" cookie: A dictionary object with the required cookie values (ds_user_id, sessionid, csrftoken).\n",
" \"\"\"\n",
" self.userid = cookie[\"ds_user_id\"]\n",
" self.sessionid = cookie[\"sessionid\"]\n",
" self.csrftoken = cookie[\"csrftoken\"]\n",
" self.mid = cookie[\"mid\"]\n",
" self.headers = {\n",
" \"accept\" : \"*/*\",\n",
" \"accept-encoding\" : \"gzip, deflate\",\n",
" \"accept-language\" : \"en-US\",\n",
" \"content_type\" : \"application/x-www-form-urlencoded; charset=UTF-8\",\n",
" \"cache-control\" : \"no-cache\",\n",
" \"cookie\" : \"ds_user_id=\" + self.userid + \"; sessionid=\" + self.sessionid + \"; csrftoken=\" + self.csrftoken + \"; mid=\" + self.mid,\n",
" \"dnt\" : \"1\",\n",
" # \"pragma\" : \"no-cache\",\n",
" # \"referer\" : \"https://www.instagram.com/\",\n",
" \"user-agent\" : \"Instagram 10.26.0 (iPhone7,2; iOS 10_1_1; en_US; en-US; scale=2.00; gamut=normal; 750x1334) AppleWebKit/420+\",\n",
" \"x-ig-capabilities\": \"36oD\",\n",
" # \"x-ig-connection-type\" : \"WIFI\",\n",
" # \"x-ig-fb-http-engine\" : \"Liger\"\n",
" }\n",
" self.session = requests.Session()\n",
" max_tries = 3\n",
" backoff_factor = 0.2\n",
" status_forcelist = (500, 502, 503, 504)\n",
" retry = Retry(total=max_tries, read=max_tries, connect=max_tries, backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n",
" adapter = HTTPAdapter(max_retries=retry)\n",
" self.session.mount('http://', adapter)\n",
" self.session.mount('https://', adapter)\n",
" self.session.headers = self.headers\n",
"\n",
" def getReelTray(self):\n",
" \"\"\"Get reel tray from API.\n",
"\n",
" Returns:\n",
" Response object with reel tray API response\n",
" \"\"\"\n",
" endpoint = \"https://i.instagram.com/api/v1/feed/reels_tray/\"\n",
" response = self.session.get(endpoint, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" return response\n",
"\n",
" def getReelMedia(self, user):\n",
" \"\"\"Get reel media of a user from API.\n",
"\n",
" Args:\n",
" user: User ID\n",
"\n",
" Returns:\n",
" Response object with reel media API response\n",
" \"\"\"\n",
" endpoint = \"https://i.instagram.com/api/v1/feed/user/\" + str(user) + \"/reel_media/\"\n",
" response = self.session.get(endpoint, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" return response\n",
"\n",
" def getStories(self):\n",
" return self.getReelTray()\n",
"\n",
" def getUserStories(self, user):\n",
" return self.getReelMedia(user)\n",
"\n",
" def getUserIDs(self, json: dict) -> list:\n",
" \"\"\"Extract user IDs from reel tray JSON.\n",
"\n",
" Args:\n",
" json: Reel tray response from IG\n",
"\n",
" Returns:\n",
" List of user IDs\n",
" \"\"\"\n",
" users = []\n",
" for user in json['tray']:\n",
" users.append(user['user']['pk'])\n",
" return users\n",
"\n",
" def getFile(self, url: str, dest: str):\n",
" \"\"\"Download file and save to destination\n",
"\n",
" Args:\n",
" url: URL of item to download\n",
" dest: File system destination to save item to\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" logging.debug(\"URL: %s\", url)\n",
" logging.debug(\"Dest: %s\", dest)\n",
" try:\n",
" if os.path.getsize(dest) == 0:\n",
" logging.info(\"Empty file exists. Removing.\")\n",
" os.remove(dest)\n",
" except FileNotFoundError:\n",
" pass\n",
"\n",
" try:\n",
" dirpath = os.path.dirname(dest)\n",
" os.makedirs(dirpath, exist_ok=True)\n",
" with open(dest, \"xb\") as handle:\n",
" response = self.session.get(url, stream=True, timeout=60)\n",
" if response.status_code != requests.codes.ok:\n",
" logging.error(\"Status Code Error.\" + str(response.status_code))\n",
" response.raise_for_status()\n",
" for data in response.iter_content(chunk_size=4194304):\n",
" handle.write(data)\n",
" handle.close()\n",
" except FileExistsError:\n",
" logging.info(\"File already exists.\")\n",
"\n",
" if os.path.getsize(dest) == 0:\n",
" logging.info(\"Error downloading. Removing.\")\n",
" os.remove(dest)\n",
"\n",
" def formatPath(self, user: str, pk: int, timestamp: int, postid: str, mediatype: int) -> str:\n",
" \"\"\"Format download path to a specific format/template\n",
"\n",
" Args:\n",
" user: User name\n",
" pk: User ID\n",
" timestamp: UTC Unix timestamp\n",
" postid: Post ID\n",
" mediatype: Media type as defined by IG\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" dirpath = os.path.dirname(__file__)\n",
" utcdatetime = datetime.utcfromtimestamp(timestamp).strftime(\"%Y-%m-%d-%H-%M-%S\")\n",
" if mediatype == 1:\n",
" ext = \".jpg\"\n",
" type = \"stories\"\n",
" elif mediatype == 2:\n",
" ext = \".mp4\"\n",
" type = \"stories\"\n",
" elif mediatype == 3:\n",
" ext = \".mp4\"\n",
" type = \"livestories\"\n",
" else:\n",
" ext = \"\"\n",
" type = \"other\"\n",
" path = os.path.join(dirpath, \"downloads\", user + \"_\" + str(pk), type, utcdatetime + \"_\" + str(timestamp) + \"_\" + postid + ext)\n",
" return path\n",
"\n",
" def downloadReel(self, resp):\n",
" \"\"\"Download stories of a followed user's tray.\n",
"\n",
" Download the stories of a followed user.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of reel from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" try:\n",
" for index, item in enumerate(resp['items']):\n",
" logging.debug(' ' + str(index))\n",
" username = item['user']['username']\n",
" userpk = item['user']['pk']\n",
" timestamp = item['taken_at']\n",
" postid = item['id']\n",
" mediatype = item['media_type']\n",
" if mediatype == 2: # Video\n",
" largest = 0\n",
" for versionindex, video in enumerate(item['video_versions']):\n",
" itemsize = video['width'] * video['height']\n",
" largestsize = item['video_versions'][largest]['width'] * \\\n",
" item['video_versions'][largest]['height']\n",
" if itemsize > largestsize:\n",
" largest = versionindex\n",
" logging.debug(' V' + str(largest))\n",
" url = item['video_versions'][largest]['url']\n",
" logging.debug(' ' + url)\n",
" elif mediatype == 1: # Image\n",
" largest = 0\n",
" for versionindex, image in enumerate(item['image_versions2']['candidates']):\n",
" itemsize = image['width'] * image['height']\n",
" largestsize = item['image_versions2']['candidates'][largest]['width'] * \\\n",
" item['image_versions2']['candidates'][largest]['height']\n",
" if itemsize > largestsize:\n",
" largest = versionindex\n",
" logging.debug(' I' + str(largest))\n",
" url = item['image_versions2']['candidates'][largest]['url']\n",
" logging.debug(' ' + url)\n",
" else: # Unknown\n",
" logging.debug(' E')\n",
" url = None\n",
" pass\n",
"\n",
" path = self.formatPath(username, userpk, timestamp, postid, mediatype)\n",
" self.getFile(url, path)\n",
" except KeyError: # JSON 'item' key does not exist for later items in tray as of 6/2/2017\n",
" pass\n",
"\n",
" def downloadTray(self, resp):\n",
" \"\"\"Download stories of logged in user's tray.\n",
"\n",
" Download the stories as available in the tray. The tray contains a list of\n",
" reels, a collection of the stories posted by a followed user.\n",
"\n",
" The tray only contains a small set of reels of the first few users. To download\n",
" the rest, a reel must be obtained for each user in the tray.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of tray from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" for reel in resp['tray']:\n",
" self.downloadReel(reel)\n",
"\n",
" def downloadStoryLive(self, resp):\n",
" \"\"\"Download post-live stories of a followed user's tray.\n",
"\n",
" Download the post-live stories of a followed user.\n",
"\n",
" Args:\n",
" resp: JSON dictionary of reel from IG API\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" try:\n",
" for index,item in enumerate(resp[\"post_live\"][\"post_live_items\"]):\n",
" logging.debug(' ' + str(index))\n",
" username = item[\"user\"][\"username\"]\n",
" userpk = item[\"user\"][\"pk\"]\n",
" for bindex,broadcast in enumerate(item[\"broadcasts\"]):\n",
" logging.debug(' ' + str(bindex))\n",
" timestamp = broadcast[\"published_time\"]\n",
" postid = broadcast[\"media_id\"]\n",
" dash = broadcast[\"dash_manifest\"]\n",
" dashxml = xml.parseString(dash)\n",
" elements = dashxml.getElementsByTagName(\"BaseURL\")\n",
" for eindex,element in enumerate(elements):\n",
" for node in element.childNodes:\n",
" if node.nodeType == node.TEXT_NODE:\n",
" url = node.data\n",
" mediatype = 3\n",
" path = self.formatPath(username, userpk, timestamp, postid + \"_\" + str(eindex), mediatype)\n",
" self.getFile(url, path)\n",
" except KeyError: # No \"post_live\" key\n",
" logging.debug(' ' + 'No live stories.')\n",
"\n",
" def close(self):\n",
" \"\"\"Close seesion to IG\n",
"\n",
" Returns:\n",
" None\n",
" \"\"\"\n",
" self.session.close()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0.025,
0.02,
0.023809523809523808,
0.023529411764705882,
0.022222222222222223,
0.013157894736842105,
0.02631578947368421,
0,
0,
0.013793103448275862,
0,
0,
0,
0,
0,
0,
0,
0,
0.007142857142857143,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010869565217391304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011904761904761904,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011235955056179775,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.007407407407407408,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.023255813953488372,
0,
0.012195121951219513,
0,
0.012048192771084338,
0.0125,
0,
0,
0,
0,
0,
0.022222222222222223,
0,
0.010309278350515464,
0,
0.01020408163265306,
0.021052631578947368,
0,
0,
0,
0,
0,
0.03125,
0,
0,
0,
0,
0.011494252873563218,
0,
0.020618556701030927,
0,
0,
0,
0,
0,
0.012048192771084338,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.012658227848101266,
0,
0,
0,
0.014084507042253521,
0,
0,
0,
0,
0,
0,
0.015873015873015872,
0,
0,
0,
0,
0.008130081300813009,
0,
0.021739130434782608,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 272 | 0.002124 |
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
try:
snakebite_imported = True
from snakebite.client import Client, HAClient, Namenode
except ImportError:
snakebite_imported = False
from airflow.utils import AirflowException
class HDFSHookException(AirflowException):
pass
class HDFSHook(BaseHook):
'''
Interact with HDFS. This class is a wrapper around the snakebite library.
'''
def __init__(self, hdfs_conn_id='hdfs_default', proxy_user=None):
if not snakebite_imported:
raise ImportError(
'This HDFSHook implementation requires snakebite, but '
'snakebite is not compatible with Python 3 '
'(as of August 2015). Please use Python 2 if you require '
'this hook -- or help by submitting a PR!')
self.hdfs_conn_id = hdfs_conn_id
self.proxy_user = proxy_user
def get_conn(self):
'''
Returns a snakebite HDFSClient object.
'''
use_sasl = False
if configuration.get('core', 'security') == 'kerberos':
use_sasl = True
connections = self.get_connections(self.hdfs_conn_id)
client = None
''' When using HAClient, proxy_user must be the same, so is ok to always take the first '''
effective_user = self.proxy_user or connections[0].login
if len(connections) == 1:
client = Client(connections[0].host, connections[0].port, use_sasl=use_sasl, effective_user=effective_user)
elif len(connections) > 1:
nn = [Namenode(conn.host, conn.port) for conn in connections]
client = HAClient(nn, use_sasl=use_sasl, effective_user=effective_user)
else:
raise HDFSHookException("conn_id doesn't exist in the repository")
return client
| [
"from airflow.hooks.base_hook import BaseHook\n",
"from airflow import configuration\n",
"\n",
"try:\n",
" snakebite_imported = True\n",
" from snakebite.client import Client, HAClient, Namenode\n",
"except ImportError:\n",
" snakebite_imported = False\n",
"\n",
"from airflow.utils import AirflowException\n",
"\n",
"\n",
"class HDFSHookException(AirflowException):\n",
" pass\n",
"\n",
"\n",
"class HDFSHook(BaseHook):\n",
" '''\n",
" Interact with HDFS. This class is a wrapper around the snakebite library.\n",
" '''\n",
" def __init__(self, hdfs_conn_id='hdfs_default', proxy_user=None):\n",
" if not snakebite_imported:\n",
" raise ImportError(\n",
" 'This HDFSHook implementation requires snakebite, but '\n",
" 'snakebite is not compatible with Python 3 '\n",
" '(as of August 2015). Please use Python 2 if you require '\n",
" 'this hook -- or help by submitting a PR!')\n",
" self.hdfs_conn_id = hdfs_conn_id\n",
" self.proxy_user = proxy_user\n",
"\n",
" def get_conn(self):\n",
" '''\n",
" Returns a snakebite HDFSClient object.\n",
" '''\n",
" use_sasl = False\n",
" if configuration.get('core', 'security') == 'kerberos':\n",
" use_sasl = True\n",
"\n",
" connections = self.get_connections(self.hdfs_conn_id)\n",
" client = None\n",
"\t''' When using HAClient, proxy_user must be the same, so is ok to always take the first '''\n",
"\teffective_user = self.proxy_user or connections[0].login\n",
" if len(connections) == 1:\n",
" client = Client(connections[0].host, connections[0].port, use_sasl=use_sasl, effective_user=effective_user)\n",
" elif len(connections) > 1:\n",
" nn = [Namenode(conn.host, conn.port) for conn in connections]\n",
" client = HAClient(nn, use_sasl=use_sasl, effective_user=effective_user)\n",
" else:\n",
" raise HDFSHookException(\"conn_id doesn't exist in the repository\")\n",
" return client\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03225806451612903,
0.017241379310344827,
0.029411764705882353,
0.008333333333333333,
0,
0,
0.011904761904761904,
0,
0,
0
] | 50 | 0.001983 |
import json
import csv
"""
def write_to_text(data, filename):
f = open(filename, 'w', encoding = 'utf-8')
for line in data:
f.write(line + '\n')
f.close()
"""
def read_text_file(filename):
ignore = []
ignore.append('[')
ignore.append(']')
ignore.append('')
f = open(filename, encoding='utf-8')
table_original = []
for line in f:
line = line.rstrip('\n')
if line not in ignore:
if 'Getting snapshot pages' not in line:
table_original.append(line.rstrip(','))
return table_original
def write_to_csv(data, filename):
with open(filename, 'w') as csvfile:
w = csv.writer(csvfile, delimiter = ',')
for row in data:
try:
w.writerow(row)
except:
print(row, 'not written to file.')
print(filename, 'created.')
def extract_url_database(filename):
u = read_text_file(filename)
url_list = [['url', 'timestamp', 'id']]
for article in u:
url_object = []
url_object.append(json.loads(article)['file_url'])
url_object.append(json.loads(article)['timestamp'])
url_object.append(json.loads(article)['file_id'])
url_list.append(url_object)
return(url_list)
# write list to text file, one URL per line
write_to_csv(extract_url_database('whg.txt'), 'whg_url_data.csv')
| [
"import json\n",
"import csv\n",
"\n",
"\"\"\"\n",
"def write_to_text(data, filename):\n",
" f = open(filename, 'w', encoding = 'utf-8')\n",
" for line in data:\n",
" f.write(line + '\\n')\n",
" f.close()\n",
"\"\"\"\n",
"\n",
"def read_text_file(filename):\n",
" ignore = []\n",
" ignore.append('[')\n",
" ignore.append(']')\n",
" ignore.append('')\n",
" f = open(filename, encoding='utf-8')\n",
" table_original = []\n",
" for line in f:\n",
" line = line.rstrip('\\n')\n",
" if line not in ignore:\n",
" if 'Getting snapshot pages' not in line:\n",
" table_original.append(line.rstrip(','))\n",
" return table_original\n",
"\n",
"def write_to_csv(data, filename):\n",
" with open(filename, 'w') as csvfile:\n",
" w = csv.writer(csvfile, delimiter = ',')\n",
" for row in data:\n",
" try:\n",
" w.writerow(row)\n",
" except:\n",
" print(row, 'not written to file.')\n",
" print(filename, 'created.')\n",
"\n",
"def extract_url_database(filename):\n",
" u = read_text_file(filename)\n",
" url_list = [['url', 'timestamp', 'id']]\n",
" for article in u:\n",
" url_object = []\n",
" url_object.append(json.loads(article)['file_url'])\n",
" url_object.append(json.loads(article)['timestamp'])\n",
" url_object.append(json.loads(article)['file_id'])\n",
" url_list.append(url_object)\n",
" return(url_list)\n",
"\n",
"# write list to text file, one URL per line\n",
"write_to_csv(extract_url_database('whg.txt'), 'whg_url_data.csv')\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.029411764705882353,
0,
0.04081632653061224,
0,
0,
0,
0.05,
0,
0,
0,
0.027777777777777776,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015151515151515152,
1
] | 49 | 0.024418 |
#!/usr/bin/python -u
from optparse import OptionParser, OptionGroup
from sys import argv as sys_argv, exit
from swiftclient import Client
def upload(options):
values = {
'username': options.os_username,
'password': options.os_password,
'auth_url': options.os_auth_url,
'project_name': options.os_project_name,
'user_domain_name': options.os_user_domain_name,
'project_domain_name': options.os_project_domain_name,
'storage_url': options.os_storage_url,
}
container = options.container
path = options.upload_path
cli = Client(values)
cli.upload(container, path)
def download(options):
values = {
'username': options.os_username,
'password': options.os_password,
'auth_url': options.os_auth_url,
'project_name': options.os_project_name,
'user_domain_name': options.os_user_domain_name,
'project_domain_name': options.os_project_domain_name,
'storage_url': options.os_storage_url,
}
container = options.container
objectname = options.object
download_path = options.download_path
cli = Client(values)
cli.download(container, objectname, download_path)
def main(arguments=None):
if arguments:
argv = arguments
else:
argv = sys_argv
version = '0.0.1'
parser = OptionParser(version='%%prog %s' % version,
usage='''
Command-line interface to the OpenStack Swift API.
usage: %%prog [--version] [--help]
Mandatory Switch:
[--os-username <auth-user-name>]
[--os-password <auth-password>]
[--os-project-name <auth-project-name>]
[--os-auth-url <auth-url>]
[--os-user-domain-name <auth-user-domain-name>]
[--os-project-domain-name <auth-project-domain-name>]
[--os-storage-url <storage-url>]
[--operation-type <operation-type>]
[--container <container-name>]
Command Specific Switch:
For Upload (Uploads files or directories to the given container from the upload path.):
[--upload-path <upload-path>]
For Download (Downloads files from the given container in the download path.):
[--object <object-name>]
[--download-path <download-path>]
'''.strip('\n') % globals())
parser.add_option('--insecure',
action="store_true", dest="insecure",
default=True,
help='Allow swiftclient to access servers without '
'having to verify the SSL certificate. '
'Defaults to env[SWIFTCLIENT_INSECURE] '
'(set to \'true\' to enable).')
os_grp = OptionGroup(parser, "OpenStack authentication options")
os_grp.add_option('--os-username',
metavar='<auth-user-name>',
help='OpenStack username required to authenticate with OpenStack swift. ')
os_grp.add_option('--os_username',
help='OpenStack username required to authenticate with OpenStack swift. ')
os_grp.add_option('--os-password',
metavar='<auth-password>',
help='OpenStack password required to authenticate with OpenStack swift.')
os_grp.add_option('--os-user-domain-name',
metavar='<user-domain-name>',
help='OpenStack user domain name required to connect with OpenStack swift.')
os_grp.add_option('--os-project-name',
metavar='<project-name>',
help='OpenStack project name required to connect with OpenStack swift.')
os_grp.add_option('--os-project-domain-name',
metavar='<project-domain-name>',
help='OpenStack project domain name required to connect with OpenStack swift.')
os_grp.add_option('--os-auth-url',
metavar='<auth-url>',
help='OpenStack auth URL required to authenticate with OpenStack Identity to get the '
'authentication token.')
os_grp.add_option('--os-storage-url',
metavar='<storage-url>',
help='OpenStack storage URL required to connect with the OpenStack Swift.')
os_grp.add_option('--operation-type',
metavar='<operation-type>',
help='Specified OpenStack swift related operation which can be upload or download.')
os_grp.add_option('--container',
metavar='<container-name>',
help='Specified container name to upload/download object.')
os_grp.add_option('--object',
metavar='<object-name>',
help='Specified object name to be downloaded in the downloaded path.')
os_grp.add_option('--upload-path',
metavar='<upload-path>',
help='Upload path of the file or directory.')
os_grp.add_option('--download-path',
metavar='<download-path>',
help='Download path to download the object.')
(options, args) = parser.parse_args(argv[1:])
try:
if(options.operation_type == 'upload'):
if(options.upload_path is None):
parser.print_help()
exit()
else:
upload(options)
elif(options.operation_type == 'download'):
if (options.object is None and options.download_path is None):
parser.print_help()
exit()
else:
download(options)
else:
parser.print_help()
exit()
except Exception as err:
print(str(err))
if __name__ == '__main__':
main()
| [
"#!/usr/bin/python -u\n",
"\n",
"from optparse import OptionParser, OptionGroup\n",
"from sys import argv as sys_argv, exit\n",
"\n",
"from swiftclient import Client\n",
"\n",
"def upload(options):\n",
"\n",
" values = {\n",
" 'username': options.os_username,\n",
" 'password': options.os_password,\n",
" 'auth_url': options.os_auth_url,\n",
" 'project_name': options.os_project_name,\n",
" 'user_domain_name': options.os_user_domain_name,\n",
" 'project_domain_name': options.os_project_domain_name,\n",
" 'storage_url': options.os_storage_url,\n",
" }\n",
"\n",
" container = options.container\n",
" path = options.upload_path\n",
"\n",
" cli = Client(values)\n",
" cli.upload(container, path)\n",
"\n",
"def download(options):\n",
"\n",
" values = {\n",
" 'username': options.os_username,\n",
" 'password': options.os_password,\n",
" 'auth_url': options.os_auth_url,\n",
" 'project_name': options.os_project_name,\n",
" 'user_domain_name': options.os_user_domain_name,\n",
" 'project_domain_name': options.os_project_domain_name,\n",
" 'storage_url': options.os_storage_url,\n",
" }\n",
"\n",
" container = options.container\n",
" objectname = options.object\n",
" download_path = options.download_path\n",
"\n",
" cli = Client(values)\n",
" cli.download(container, objectname, download_path)\n",
"\n",
"def main(arguments=None):\n",
" if arguments:\n",
" argv = arguments\n",
" else:\n",
" argv = sys_argv\n",
"\n",
" version = '0.0.1'\n",
"\n",
" parser = OptionParser(version='%%prog %s' % version,\n",
" usage='''\n",
"Command-line interface to the OpenStack Swift API.\n",
"\n",
"usage: %%prog [--version] [--help]\n",
"\n",
"Mandatory Switch: \n",
" [--os-username <auth-user-name>]\n",
" [--os-password <auth-password>]\n",
" [--os-project-name <auth-project-name>]\n",
" [--os-auth-url <auth-url>]\n",
" [--os-user-domain-name <auth-user-domain-name>]\n",
" [--os-project-domain-name <auth-project-domain-name>]\n",
" [--os-storage-url <storage-url>]\n",
" [--operation-type <operation-type>]\n",
" [--container <container-name>]\n",
"\n",
"Command Specific Switch:\n",
"\n",
"For Upload (Uploads files or directories to the given container from the upload path.):\n",
" [--upload-path <upload-path>]\n",
"\n",
"For Download (Downloads files from the given container in the download path.):\n",
" [--object <object-name>]\n",
" [--download-path <download-path>] \n",
"'''.strip('\\n') % globals())\n",
" parser.add_option('--insecure',\n",
" action=\"store_true\", dest=\"insecure\",\n",
" default=True,\n",
" help='Allow swiftclient to access servers without '\n",
" 'having to verify the SSL certificate. '\n",
" 'Defaults to env[SWIFTCLIENT_INSECURE] '\n",
" '(set to \\'true\\' to enable).')\n",
"\n",
" os_grp = OptionGroup(parser, \"OpenStack authentication options\")\n",
"\n",
" os_grp.add_option('--os-username',\n",
" metavar='<auth-user-name>',\n",
" help='OpenStack username required to authenticate with OpenStack swift. ')\n",
" os_grp.add_option('--os_username',\n",
" help='OpenStack username required to authenticate with OpenStack swift. ')\n",
" os_grp.add_option('--os-password',\n",
" metavar='<auth-password>',\n",
" help='OpenStack password required to authenticate with OpenStack swift.')\n",
" os_grp.add_option('--os-user-domain-name',\n",
" metavar='<user-domain-name>',\n",
" help='OpenStack user domain name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-project-name',\n",
" metavar='<project-name>',\n",
" help='OpenStack project name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-project-domain-name',\n",
" metavar='<project-domain-name>',\n",
" help='OpenStack project domain name required to connect with OpenStack swift.')\n",
" os_grp.add_option('--os-auth-url',\n",
" metavar='<auth-url>',\n",
" help='OpenStack auth URL required to authenticate with OpenStack Identity to get the '\n",
" 'authentication token.')\n",
" os_grp.add_option('--os-storage-url',\n",
" metavar='<storage-url>',\n",
" help='OpenStack storage URL required to connect with the OpenStack Swift.')\n",
" os_grp.add_option('--operation-type',\n",
" metavar='<operation-type>',\n",
" help='Specified OpenStack swift related operation which can be upload or download.')\n",
" os_grp.add_option('--container',\n",
" metavar='<container-name>',\n",
" help='Specified container name to upload/download object.')\n",
" os_grp.add_option('--object',\n",
" metavar='<object-name>',\n",
" help='Specified object name to be downloaded in the downloaded path.')\n",
" os_grp.add_option('--upload-path',\n",
" metavar='<upload-path>',\n",
" help='Upload path of the file or directory.')\n",
" os_grp.add_option('--download-path',\n",
" metavar='<download-path>',\n",
" help='Download path to download the object.')\n",
"\n",
" (options, args) = parser.parse_args(argv[1:])\n",
"\n",
" try:\n",
" if(options.operation_type == 'upload'):\n",
" if(options.upload_path is None):\n",
" parser.print_help()\n",
" exit()\n",
" else:\n",
" upload(options)\n",
" elif(options.operation_type == 'download'):\n",
" if (options.object is None and options.download_path is None):\n",
" parser.print_help()\n",
" exit()\n",
" else:\n",
" download(options)\n",
" else:\n",
" parser.print_help()\n",
" exit()\n",
"\n",
" except Exception as err:\n",
" print(str(err))\n",
"\n",
"if __name__ == '__main__':\n",
" main()\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.043478260869565216,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.038461538461538464,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0,
0,
0,
0,
0.017543859649122806,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010309278350515464,
0,
0.010309278350515464,
0,
0,
0.010416666666666666,
0,
0,
0.010101010101010102,
0,
0,
0.010526315789473684,
0,
0,
0.00980392156862745,
0,
0,
0.009174311926605505,
0,
0,
0,
0.020202020202020204,
0,
0,
0.009345794392523364,
0,
0,
0.012195121951219513,
0,
0,
0.010752688172043012,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.037037037037037035,
0
] | 152 | 0.002443 |
# Copyright (c) 2015 Huawei, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import paste.urlmap
from congress.api import application
from congress.api import versions
def wsgi_app():
mapper = paste.urlmap.URLMap()
mapper['/'] = versions.Versions()
api_resource_mgr = application.ResourceManager()
api_resource_mgr.register_handler(versions.VersionV1Handler(r'/v1[/]?'))
app = application.ApiApplication(api_resource_mgr)
mapper['/v1'] = app
return mapper
| [
"# Copyright (c) 2015 Huawei, Inc. All rights reserved.\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"#\n",
"\n",
"import paste.urlmap\n",
"\n",
"from congress.api import application\n",
"from congress.api import versions\n",
"\n",
"\n",
"def wsgi_app():\n",
"\n",
" mapper = paste.urlmap.URLMap()\n",
" mapper['/'] = versions.Versions()\n",
"\n",
" api_resource_mgr = application.ResourceManager()\n",
" api_resource_mgr.register_handler(versions.VersionV1Handler(r'/v1[/]?'))\n",
" app = application.ApiApplication(api_resource_mgr)\n",
" mapper['/v1'] = app\n",
"\n",
" return mapper\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 32 | 0 |
from __future__ import print_function
from ase.db.table import dict2forces, hill
from ase.data import atomic_masses, chemical_symbols
from ase.db.core import float_to_time_string, now
import numpy as np
class Summary:
def __init__(self, dct, subscript=None):
self.dct = dct
self.cell = [['{0:.3f}'.format(a) for a in axis] for axis in dct.cell]
forces = dict2forces(dct)
if forces is None:
fmax = None
self.forces = None
else:
fmax = (forces**2).sum(1).max()**0.5
N = len(forces)
self.forces = []
for n, f in enumerate(forces):
if n < 5 or n >= N - 5:
f = tuple('{0:10.3f}'.format(x) for x in f)
symbol = chemical_symbols[dct.numbers[n]]
self.forces.append((n, symbol) + f)
elif n == 5:
self.forces.append((' ...', '',
' ...',
' ...',
' ...'))
self.stress = dct.get('stress')
if self.stress is not None:
self.stress = ', '.join('{0:.3f}'.format(s) for s in self.stress)
if 'masses' in dct:
mass = dct.masses.sum()
else:
mass = atomic_masses[dct.numbers].sum()
formula = hill(dct.numbers)
if subscript:
formula = subscript.sub(r'<sub>\1</sub>', formula)
table = [
('id', dct.id),
('age', float_to_time_string(now() - dct.ctime, True)),
('formula', formula),
('user', dct.user),
('calculator', dct.get('calculator')),
('energy [eV]', dct.get('energy')),
('fmax [eV/Ang]', fmax),
('charge [|e|]', dct.get('charge')),
('mass [au]', mass),
('unique id', dct.unique_id),
('volume [Ang^3]', abs(np.linalg.det(dct.cell)))]
self.table = [(name, value) for name, value in table
if value is not None]
if 'key_value_pairs' in dct:
self.key_value_pairs = sorted(dct.key_value_pairs.items())
else:
self.key_value_pairs = None
if 'keywords' in dct:
self.keywords = ', '.join(sorted(dct.keywords))
else:
self.keywords = None
self.dipole = dct.get('dipole')
if self.dipole is not None:
self.dipole = ', '.join('{0:.3f}'.format(d) for d in self.dipole)
self.data = dct.get('data')
if self.data:
self.data = ', '.join(self.data.keys())
self.constraints = dct.get('constraints')
if self.constraints:
self.constraints = ', '.join(d['name'] for d in self.constraints)
def write(self):
dct = self.dct
width = max(len(name) for name, value in self.table)
for name, value in self.table:
print('{0:{width}}|{1}'.format(name, value, width=width))
print('\nUnit cell in Ang:')
print('axis|periodic| x| y| z')
c = 1
for p, axis in zip(dct.pbc, self.cell):
print(' {0}| {1}|{2[0]:>11}|{2[1]:>11}|{2[2]:>11}'.format(
c, [' no', 'yes'][p], axis))
c += 1
if self.key_value_pairs:
print('\nKey-value pairs:')
width = max(len(key) for key, value in self.key_value_pairs)
for key, value in self.key_value_pairs:
print('{0:{width}}|{1}'.format(key, value, width=width))
if self.keywords:
print('\nKeywords:', self.keywords)
if self.forces:
print('\nForces in ev/Ang:')
for f in self.forces:
print('{0:4}|{1:2}|{2}|{3}|{4}'.format(*f))
if self.stress:
print('\nStress tensor (xx, yy, zz, zy, zx, yx) in eV/Ang^3:')
print(' ', self.stress)
if self.dipole:
print('\nDipole moment in e*Ang: ({0})'.format(self.dipole))
if self.constraints:
print('\nConstraints:', self.constraints)
if self.data:
print('\nData:', self.data)
| [
"from __future__ import print_function\n",
"\n",
"from ase.db.table import dict2forces, hill\n",
"from ase.data import atomic_masses, chemical_symbols\n",
"from ase.db.core import float_to_time_string, now\n",
"\n",
"import numpy as np\n",
"\n",
"\n",
"class Summary:\n",
" def __init__(self, dct, subscript=None):\n",
" self.dct = dct\n",
" \n",
" self.cell = [['{0:.3f}'.format(a) for a in axis] for axis in dct.cell]\n",
" \n",
" forces = dict2forces(dct)\n",
" if forces is None:\n",
" fmax = None\n",
" self.forces = None\n",
" else:\n",
" fmax = (forces**2).sum(1).max()**0.5\n",
" N = len(forces)\n",
" self.forces = []\n",
" for n, f in enumerate(forces):\n",
" if n < 5 or n >= N - 5:\n",
" f = tuple('{0:10.3f}'.format(x) for x in f)\n",
" symbol = chemical_symbols[dct.numbers[n]]\n",
" self.forces.append((n, symbol) + f)\n",
" elif n == 5:\n",
" self.forces.append((' ...', '',\n",
" ' ...',\n",
" ' ...',\n",
" ' ...'))\n",
" \n",
" self.stress = dct.get('stress')\n",
" if self.stress is not None:\n",
" self.stress = ', '.join('{0:.3f}'.format(s) for s in self.stress)\n",
" \n",
" if 'masses' in dct:\n",
" mass = dct.masses.sum()\n",
" else:\n",
" mass = atomic_masses[dct.numbers].sum()\n",
" \n",
" formula = hill(dct.numbers)\n",
" if subscript:\n",
" formula = subscript.sub(r'<sub>\\1</sub>', formula)\n",
" \n",
" table = [\n",
" ('id', dct.id),\n",
" ('age', float_to_time_string(now() - dct.ctime, True)),\n",
" ('formula', formula),\n",
" ('user', dct.user),\n",
" ('calculator', dct.get('calculator')),\n",
" ('energy [eV]', dct.get('energy')),\n",
" ('fmax [eV/Ang]', fmax),\n",
" ('charge [|e|]', dct.get('charge')),\n",
" ('mass [au]', mass),\n",
" ('unique id', dct.unique_id),\n",
" ('volume [Ang^3]', abs(np.linalg.det(dct.cell)))]\n",
" self.table = [(name, value) for name, value in table\n",
" if value is not None]\n",
"\n",
" if 'key_value_pairs' in dct:\n",
" self.key_value_pairs = sorted(dct.key_value_pairs.items())\n",
" else:\n",
" self.key_value_pairs = None\n",
"\n",
" if 'keywords' in dct:\n",
" self.keywords = ', '.join(sorted(dct.keywords))\n",
" else:\n",
" self.keywords = None\n",
" \n",
" self.dipole = dct.get('dipole')\n",
" if self.dipole is not None:\n",
" self.dipole = ', '.join('{0:.3f}'.format(d) for d in self.dipole)\n",
" \n",
" self.data = dct.get('data')\n",
" if self.data:\n",
" self.data = ', '.join(self.data.keys())\n",
" \n",
" self.constraints = dct.get('constraints')\n",
" if self.constraints:\n",
" self.constraints = ', '.join(d['name'] for d in self.constraints)\n",
" \n",
" def write(self):\n",
" dct = self.dct\n",
" \n",
" width = max(len(name) for name, value in self.table)\n",
" for name, value in self.table:\n",
" print('{0:{width}}|{1}'.format(name, value, width=width))\n",
"\n",
" print('\\nUnit cell in Ang:')\n",
" print('axis|periodic| x| y| z')\n",
" c = 1\n",
" for p, axis in zip(dct.pbc, self.cell):\n",
" print(' {0}| {1}|{2[0]:>11}|{2[1]:>11}|{2[2]:>11}'.format(\n",
" c, [' no', 'yes'][p], axis))\n",
" c += 1\n",
" \n",
" if self.key_value_pairs:\n",
" print('\\nKey-value pairs:')\n",
" width = max(len(key) for key, value in self.key_value_pairs)\n",
" for key, value in self.key_value_pairs:\n",
" print('{0:{width}}|{1}'.format(key, value, width=width))\n",
" \n",
" if self.keywords:\n",
" print('\\nKeywords:', self.keywords)\n",
" \n",
" if self.forces:\n",
" print('\\nForces in ev/Ang:')\n",
" for f in self.forces:\n",
" print('{0:4}|{1:2}|{2}|{3}|{4}'.format(*f))\n",
"\n",
" if self.stress:\n",
" print('\\nStress tensor (xx, yy, zz, zy, zx, yx) in eV/Ang^3:')\n",
" print(' ', self.stress)\n",
"\n",
" if self.dipole:\n",
" print('\\nDipole moment in e*Ang: ({0})'.format(self.dipole))\n",
" \n",
" if self.constraints:\n",
" print('\\nConstraints:', self.constraints)\n",
" \n",
" if self.data:\n",
" print('\\nData:', self.data)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.047619047619047616,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0.1111111111111111,
0,
0,
0.1111111111111111,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.07692307692307693,
0,
0,
0,
0,
0,
0.058823529411764705,
0,
0,
0.058823529411764705,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1111111111111111,
0,
0,
0.07692307692307693,
0,
0
] | 125 | 0.010963 |
import sys
from metatlas import metatlas_objects as metob
import metatlas.helpers.metatlas_get_data_helper_fun as ma_data
import os
from IPython.display import display
import matplotlib.pyplot as plt
try:
import ipywidgets as widgets
except ImportError:
from IPython.html import widgets
try:
import traitlets
except ImportError:
from IPython.utils import traitlets
from ipywidgets import interact, interactive, fixed, FloatSlider
import copy
data = []
groups = []
file_names = []
compound_names = []
compound_objects = []
files_idx = dict()
compound_idx = dict()
groups_idx = dict()
# one select for the compound
wcompounds = widgets.Select(
description="compounds",
options=[]
)
# have a multiple select for the files
wfiles = widgets.SelectMultiple(
description="files",
options=[]
)
wfname = widgets.Text(
description='Atlas Name',
value='myAtlas',
)
all_files = widgets.Checkbox(
description='Select All Files',
value=False,
)
plot_button = widgets.Button(description='Plot me')
create_atlas_btn = widgets.Button(description="Create Atlas")
rtmin_widget = FloatSlider()
rtpeak_widget = FloatSlider()
rtmax_widget = FloatSlider()
###########################################################################
###
def plot_intensity(cval, fvals, rt_min, rt_max, rt_peak):
for i in range(len(fvals)):
d = data[files_idx[fvals[i]]][compound_idx[cval]]
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
###########################################################################
###
def create_atlas(b):
identifications = list()
file_names = wfiles.value
compound_name = wcompounds.value
idx2 = compound_idx[compound_name]
atlas = metob.Atlas()
atlas.name = wfname.value
# create an empty rt reference
rt_ref = metob.RtReference()
rt_ref.rt_min = rtmin_widget.value
rt_ref.rt_max = rtmax_widget.value
rt_ref.rt_peak = rtpeak_widget.value
rt_ref.rt_units = data[0][idx2]['identification'].rt_references[0].rt_units
# create an empty mz_reference
mz_ref = metob.MzReference()
mz_ref.mz = data[0][idx2]['identification'].mz_references[0].mz
mz_ref.mz_tolerance = data[0][idx2]['identification'].mz_references[0].mz_tolerance
mz_ref.mz_tolerance_units = data[0][idx2]['identification'].mz_references[0].mz_tolerance_units
mz_ref.detected_polarity = data[0][idx2]['identification'].mz_references[0].detected_polarity
identification = metob.CompoundIdentification()
identification.compoud = compound_name
identification.name = compound_name
identification.rt_references = [rt_ref]
identification.mz_references = [mz_ref]
identifications.append(identification)
atlas.compound_identifications = identifications
#metob.store(atlas)
def select_files(b):
all_files.value = not all_files.value
###########################################################################
##
def plot_button_clicked(b):
plt.cla()
plt.clf()
plt.close()
fvals = list(wfiles.value)
cval = wcompounds.value
global rtmin_widget, rtmax_widget, rtpeak_widget
min_x = list()
max_x = list()
if len(fvals) == 1 and fvals[0] == 'all':
fvals = file_names
elif len(fvals) > 1 and 'all' in fvals:
fvals.remove('all')
#if all_files.value == True:
# fvals = file_names
#else:
# fvals = wfiles.value
for i in range(len(fvals)):
d = data[files_idx[fvals[i]]][compound_idx[cval]]
rt_min = d['identification'].rt_references[0].rt_min
rt_max = d['identification'].rt_references[0].rt_max
rt_peak = d['identification'].rt_references[0].rt_peak
if len(d['data']['eic']['rt']) > 0:
x = d['data']['eic']['rt']
y = d['data']['eic']['intensity']
min_x.append(min(x))
max_x.append(max(x))
plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)
plt.axvline(rt_min, color='b', linewidth=2.0)
plt.axvline(rt_max, color='g', linewidth=2.0)
plt.axvline(rt_peak, color='r', linewidth=2.0)
rtmin_widget.close()
rtpeak_widget.close()
rtmax_widget.close()
rtmin_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_min, color='blue')
rtpeak_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_peak, color='red')
rtmax_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_max, color='green')
interact(plot_intensity,
cval=fixed(cval),
fvals=fixed(fvals),
rt_min=rtmin_widget,
rt_peak=rtpeak_widget,
rt_max=rtmax_widget)
def dill2atlas(fname):
global data, groups, file_names, compound_names, compound_objects, files_idx, compound_idx, groups_idx
data = ma_data.get_dill_data(fname)
groups = ma_data.get_group_names(data)
file_names = ma_data.get_file_names(data)
(compound_names, compound_objects) = ma_data.get_compound_names(data)
files_idx = dict()
for f_idx, f_name in enumerate(file_names):
files_idx[f_name] = f_idx
compound_idx = dict()
for cpd_idx, cpd_name in enumerate(compound_names):
compound_idx[cpd_name] = cpd_idx
groups_idx = dict()
for grp_idx, grp_name in enumerate(groups):
groups_idx[grp_name] = grp_idx
wcompounds.options=compound_names
wfiles.options= ['all'] + file_names
display(widgets.HBox((wfname, create_atlas_btn)))
display(widgets.HBox((wcompounds, wfiles)))
display(plot_button)
plot_button.on_click(plot_button_clicked)
create_atlas_btn.on_click(create_atlas)
all_files.observe(select_files)
| [
"import sys\n",
"\n",
"from metatlas import metatlas_objects as metob\n",
"import metatlas.helpers.metatlas_get_data_helper_fun as ma_data\n",
"import os\n",
"\n",
"\n",
"from IPython.display import display\n",
"import matplotlib.pyplot as plt\n",
"try:\n",
" import ipywidgets as widgets\n",
"except ImportError:\n",
" from IPython.html import widgets\n",
"try:\n",
" import traitlets\n",
"except ImportError:\n",
" from IPython.utils import traitlets\n",
"\n",
"from ipywidgets import interact, interactive, fixed, FloatSlider\n",
"\n",
"\n",
"import copy\n",
"\n",
"\n",
"\n",
"data = []\n",
"groups = []\n",
"file_names = []\n",
"compound_names = []\n",
"compound_objects = []\n",
"\n",
"files_idx = dict()\n",
"compound_idx = dict()\n",
"groups_idx = dict()\n",
"\n",
"\n",
"\n",
"# one select for the compound\n",
"wcompounds = widgets.Select(\n",
" description=\"compounds\",\n",
" options=[]\n",
")\n",
"\n",
"# have a multiple select for the files\n",
"wfiles = widgets.SelectMultiple(\n",
" description=\"files\",\n",
" options=[]\n",
")\n",
"\n",
"wfname = widgets.Text(\n",
" description='Atlas Name',\n",
" value='myAtlas',\n",
")\n",
"\n",
"all_files = widgets.Checkbox(\n",
" description='Select All Files',\n",
" value=False,\n",
")\n",
"\n",
"plot_button = widgets.Button(description='Plot me')\n",
"create_atlas_btn = widgets.Button(description=\"Create Atlas\")\n",
"\n",
"rtmin_widget = FloatSlider()\n",
"rtpeak_widget = FloatSlider()\n",
"rtmax_widget = FloatSlider()\n",
"\n",
"\n",
"###########################################################################\n",
"###\n",
"def plot_intensity(cval, fvals, rt_min, rt_max, rt_peak):\n",
" for i in range(len(fvals)):\n",
" d = data[files_idx[fvals[i]]][compound_idx[cval]]\n",
"\n",
" if len(d['data']['eic']['rt']) > 0:\n",
" x = d['data']['eic']['rt']\n",
" y = d['data']['eic']['intensity']\n",
" plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)\n",
"\n",
" plt.axvline(rt_min, color='b', linewidth=2.0)\n",
" plt.axvline(rt_max, color='g', linewidth=2.0)\n",
" plt.axvline(rt_peak, color='r', linewidth=2.0)\n",
"\n",
"\n",
"###########################################################################\n",
"###\n",
"def create_atlas(b):\n",
" identifications = list()\n",
" file_names = wfiles.value\n",
" compound_name = wcompounds.value\n",
" idx2 = compound_idx[compound_name]\n",
"\n",
" atlas = metob.Atlas()\n",
" atlas.name = wfname.value\n",
"\n",
" # create an empty rt reference\n",
" rt_ref = metob.RtReference()\n",
"\n",
" rt_ref.rt_min = rtmin_widget.value\n",
" rt_ref.rt_max = rtmax_widget.value\n",
" rt_ref.rt_peak = rtpeak_widget.value\n",
" rt_ref.rt_units = data[0][idx2]['identification'].rt_references[0].rt_units\n",
"\n",
" # create an empty mz_reference\n",
" mz_ref = metob.MzReference()\n",
"\n",
" mz_ref.mz = data[0][idx2]['identification'].mz_references[0].mz\n",
" mz_ref.mz_tolerance = data[0][idx2]['identification'].mz_references[0].mz_tolerance\n",
" mz_ref.mz_tolerance_units = data[0][idx2]['identification'].mz_references[0].mz_tolerance_units\n",
" mz_ref.detected_polarity = data[0][idx2]['identification'].mz_references[0].detected_polarity\n",
"\n",
" identification = metob.CompoundIdentification()\n",
" identification.compoud = compound_name\n",
" identification.name = compound_name\n",
" identification.rt_references = [rt_ref]\n",
" identification.mz_references = [mz_ref]\n",
"\n",
" identifications.append(identification)\n",
"\n",
" atlas.compound_identifications = identifications\n",
" #metob.store(atlas)\n",
"\n",
"\n",
"def select_files(b):\n",
" all_files.value = not all_files.value\n",
"\n",
"\n",
"###########################################################################\n",
"##\n",
"def plot_button_clicked(b):\n",
" plt.cla()\n",
" plt.clf()\n",
" plt.close()\n",
"\n",
" fvals = list(wfiles.value)\n",
" cval = wcompounds.value\n",
" global rtmin_widget, rtmax_widget, rtpeak_widget\n",
"\n",
" min_x = list()\n",
" max_x = list()\n",
"\n",
" if len(fvals) == 1 and fvals[0] == 'all':\n",
" fvals = file_names\n",
" elif len(fvals) > 1 and 'all' in fvals:\n",
" fvals.remove('all')\n",
"\n",
"\n",
"\n",
"\n",
"\n",
" #if all_files.value == True:\n",
" # fvals = file_names\n",
" #else:\n",
" # fvals = wfiles.value\n",
"\n",
" for i in range(len(fvals)):\n",
" d = data[files_idx[fvals[i]]][compound_idx[cval]]\n",
" rt_min = d['identification'].rt_references[0].rt_min\n",
" rt_max = d['identification'].rt_references[0].rt_max\n",
" rt_peak = d['identification'].rt_references[0].rt_peak\n",
"\n",
" if len(d['data']['eic']['rt']) > 0:\n",
" x = d['data']['eic']['rt']\n",
" y = d['data']['eic']['intensity']\n",
" min_x.append(min(x))\n",
" max_x.append(max(x))\n",
" plt.plot(x, y, 'k-', ms=1, mew=0, mfc='b', alpha=1.0)\n",
"\n",
" plt.axvline(rt_min, color='b', linewidth=2.0)\n",
" plt.axvline(rt_max, color='g', linewidth=2.0)\n",
" plt.axvline(rt_peak, color='r', linewidth=2.0)\n",
"\n",
" rtmin_widget.close()\n",
" rtpeak_widget.close()\n",
" rtmax_widget.close()\n",
" rtmin_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_min, color='blue')\n",
" rtpeak_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_peak, color='red')\n",
" rtmax_widget = FloatSlider(min=min(min_x), max=max(max_x), step=0.01, value=rt_max, color='green')\n",
" interact(plot_intensity,\n",
" cval=fixed(cval),\n",
" fvals=fixed(fvals),\n",
" rt_min=rtmin_widget,\n",
" rt_peak=rtpeak_widget,\n",
" rt_max=rtmax_widget)\n",
"\n",
"\n",
"def dill2atlas(fname):\n",
" global data, groups, file_names, compound_names, compound_objects, files_idx, compound_idx, groups_idx\n",
"\n",
" data = ma_data.get_dill_data(fname)\n",
" groups = ma_data.get_group_names(data)\n",
" file_names = ma_data.get_file_names(data)\n",
" (compound_names, compound_objects) = ma_data.get_compound_names(data)\n",
"\n",
" files_idx = dict()\n",
" for f_idx, f_name in enumerate(file_names):\n",
" files_idx[f_name] = f_idx\n",
"\n",
" compound_idx = dict()\n",
" for cpd_idx, cpd_name in enumerate(compound_names):\n",
" compound_idx[cpd_name] = cpd_idx\n",
"\n",
" groups_idx = dict()\n",
" for grp_idx, grp_name in enumerate(groups):\n",
" groups_idx[grp_name] = grp_idx\n",
"\n",
"\n",
" wcompounds.options=compound_names\n",
"\n",
" wfiles.options= ['all'] + file_names\n",
"\n",
"\n",
"\n",
" display(widgets.HBox((wfname, create_atlas_btn)))\n",
" display(widgets.HBox((wcompounds, wfiles)))\n",
" display(plot_button)\n",
"\n",
"\n",
" plot_button.on_click(plot_button_clicked)\n",
" create_atlas_btn.on_click(create_atlas)\n",
" all_files.observe(select_files)\n",
"\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.03333333333333333,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011363636363636364,
0.01,
0.01020408163265306,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.041666666666666664,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.06060606060606061,
0,
0.09090909090909091,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.00980392156862745,
0.009708737864077669,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009259259259259259,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.05263157894736842,
0,
0.024390243902439025,
0,
0,
0,
0.018518518518518517,
0,
0,
0,
0,
0.021739130434782608,
0,
0,
1
] | 221 | 0.00685 |
#!/usr/bin/env python
'''
Plots the gravitational potential of the earth and moon
assumes the earth is at 0,0 and moon is on x-axis.
Inspired by http://www.wired.com/2014/07/contour-plots-with-python-and-plotly/?mbid=social_twitter
and: https://gist.github.com/rhettallain/1aa12b44d59562ce08fc
Practicing plotly
'''
__author__ = 'julenka'
import numpy as np
import plotly.plotly as py
import math
from plotly.graph_objs import *
massEarth = 5.9729e24
massMoon = 7.3477e22
distanceFromEarthToMoon = 384400 # units is km
G = 6.67384e-11
earthOffset = 100000
earthLocation = [earthOffset,earthOffset]
moonLocation = [earthOffset + distanceFromEarthToMoon/2,
earthOffset + math.sqrt(math.pow(distanceFromEarthToMoon,2) - math.pow(distanceFromEarthToMoon/2, 2))]
# make mesh
step = 10000
x = np.arange(1, 2 * distanceFromEarthToMoon, step)
y = np.arange(1, 2 * distanceFromEarthToMoon, step)
X,Y = np.meshgrid(x,y)
# gravitational potential values
V = X * 0
Vmax = 4e10
for r in range(len(X)):
for c in range(len(Y)):
currentLocation = np.array([X[r,c], Y[r,c]])
distanceToEarth = np.linalg.norm(np.subtract(currentLocation, earthLocation))
gpFromEarth = G * massEarth / distanceToEarth #V(r) = Gm/r
distanceToMoon = np.linalg.norm(np.subtract(currentLocation, moonLocation))
gpFromMoon = G * massEarth / distanceToMoon #V(r) = Gm/r
totalGp = max(0, min(Vmax, gpFromEarth + gpFromMoon))
V[r,c] = totalGp
data=[{'x':x, 'y':y, 'z':V, 'type':'contour'}]
plot_url = py.plot(data, filename='earth-moon gravitational potential') | [
"#!/usr/bin/env python\n",
"'''\n",
"Plots the gravitational potential of the earth and moon\n",
"assumes the earth is at 0,0 and moon is on x-axis.\n",
"\n",
"Inspired by http://www.wired.com/2014/07/contour-plots-with-python-and-plotly/?mbid=social_twitter\n",
"and: https://gist.github.com/rhettallain/1aa12b44d59562ce08fc\n",
"\n",
"Practicing plotly\n",
"'''\n",
"\n",
"__author__ = 'julenka'\n",
"\n",
"import numpy as np\n",
"import plotly.plotly as py\n",
"import math\n",
"from plotly.graph_objs import *\n",
"\n",
"massEarth = 5.9729e24\n",
"massMoon = 7.3477e22\n",
"distanceFromEarthToMoon = 384400 # units is km\n",
"G = 6.67384e-11\n",
"\n",
"earthOffset = 100000\n",
"earthLocation = [earthOffset,earthOffset]\n",
"moonLocation = [earthOffset + distanceFromEarthToMoon/2,\n",
" earthOffset + math.sqrt(math.pow(distanceFromEarthToMoon,2) - math.pow(distanceFromEarthToMoon/2, 2))]\n",
"\n",
"# make mesh\n",
"step = 10000\n",
"x = np.arange(1, 2 * distanceFromEarthToMoon, step)\n",
"y = np.arange(1, 2 * distanceFromEarthToMoon, step)\n",
"X,Y = np.meshgrid(x,y)\n",
"\n",
"\n",
"# gravitational potential values\n",
"V = X * 0\n",
"Vmax = 4e10\n",
"\n",
"for r in range(len(X)):\n",
" for c in range(len(Y)):\n",
" currentLocation = np.array([X[r,c], Y[r,c]])\n",
"\n",
" distanceToEarth = np.linalg.norm(np.subtract(currentLocation, earthLocation))\n",
" gpFromEarth = G * massEarth / distanceToEarth #V(r) = Gm/r\n",
"\n",
" distanceToMoon = np.linalg.norm(np.subtract(currentLocation, moonLocation))\n",
" gpFromMoon = G * massEarth / distanceToMoon #V(r) = Gm/r\n",
"\n",
" totalGp = max(0, min(Vmax, gpFromEarth + gpFromMoon))\n",
" V[r,c] = totalGp\n",
"\n",
"data=[{'x':x, 'y':y, 'z':V, 'type':'contour'}]\n",
"plot_url = py.plot(data, filename='earth-moon gravitational potential')"
] | [
0,
0,
0,
0,
0,
0.010101010101010102,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.02127659574468085,
0,
0,
0,
0.023809523809523808,
0,
0.01680672268907563,
0,
0,
0,
0,
0,
0.08695652173913043,
0,
0,
0,
0,
0,
0,
0,
0,
0.03773584905660377,
0,
0.011627906976744186,
0.029850746268656716,
0,
0.011904761904761904,
0.03076923076923077,
0,
0,
0.04,
0,
0.10638297872340426,
0.014084507042253521
] | 54 | 0.008172 |
"""
EULA APP
This module provides additional functionality to the EUAL app.
Classes:
EULAAcceptedMixin
Functions:
n/a
Created on 23 Oct 2013
@author: michael
"""
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from tunobase.eula import models, utils
class EULAAcceptedMixin(object):
"""Render EUAL to users."""
eula_url = 'eula_sign'
raise_exception = False
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
"""
Confirm that the EULA has been agreed to by the user
and if not render EULA.
"""
self.eula = get_object_or_404(models.EULA).latest_version()
# If the user has accepted the latest EULA
if not models.UserEULA.objects\
.filter(user=request.user, eula=self.eula)\
.exists():
if self.raise_exception:
raise PermissionDenied
else:
if hasattr(self, 'get_object'):
self.object = self.get_object()
eula_url_kwargs = {
'content_type_id': ContentType.objects\
.get_for_model(self.object).id,
'object_pk': self.object.pk
}
eula_url = reverse_lazy(
self.eula_url, kwargs=eula_url_kwargs
)
else:
eula_url = reverse_lazy(self.eula_url)
return utils.redirect_to_eula(
request.get_full_path(),
eula_url
)
return super(EULAAcceptedMixin, self).dispatch(
request,
*args,
**kwargs
)
| [
"\"\"\"\n",
"EULA APP\n",
"\n",
"This module provides additional functionality to the EUAL app.\n",
"\n",
"Classes:\n",
" EULAAcceptedMixin\n",
"\n",
"Functions:\n",
" n/a\n",
"\n",
"Created on 23 Oct 2013\n",
"\n",
"@author: michael\n",
"\n",
"\"\"\"\n",
"from django.core.exceptions import PermissionDenied\n",
"from django.core.urlresolvers import reverse_lazy\n",
"from django.contrib.auth.decorators import login_required\n",
"from django.contrib.contenttypes.models import ContentType\n",
"from django.shortcuts import get_object_or_404\n",
"from django.utils.decorators import method_decorator\n",
"\n",
"from tunobase.eula import models, utils\n",
"\n",
"class EULAAcceptedMixin(object):\n",
" \"\"\"Render EUAL to users.\"\"\"\n",
"\n",
" eula_url = 'eula_sign'\n",
" raise_exception = False\n",
"\n",
" @method_decorator(login_required)\n",
" def dispatch(self, request, *args, **kwargs):\n",
" \"\"\"\n",
" Confirm that the EULA has been agreed to by the user\n",
" and if not render EULA.\n",
"\n",
" \"\"\"\n",
" self.eula = get_object_or_404(models.EULA).latest_version()\n",
"\n",
" # If the user has accepted the latest EULA\n",
" if not models.UserEULA.objects\\\n",
" .filter(user=request.user, eula=self.eula)\\\n",
" .exists():\n",
" if self.raise_exception:\n",
" raise PermissionDenied\n",
" else:\n",
" if hasattr(self, 'get_object'):\n",
" self.object = self.get_object()\n",
" eula_url_kwargs = {\n",
" 'content_type_id': ContentType.objects\\\n",
" .get_for_model(self.object).id,\n",
" 'object_pk': self.object.pk\n",
" }\n",
" eula_url = reverse_lazy(\n",
" self.eula_url, kwargs=eula_url_kwargs\n",
" )\n",
" else:\n",
" eula_url = reverse_lazy(self.eula_url)\n",
"\n",
" return utils.redirect_to_eula(\n",
" request.get_full_path(),\n",
" eula_url\n",
" )\n",
"\n",
" return super(EULAAcceptedMixin, self).dispatch(\n",
" request,\n",
" *args,\n",
" **kwargs\n",
" )\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.030303030303030304,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.015625,
0.015625,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 70 | 0.000879 |
"""Fixtures for tests in this directory."""
import multiprocessing
import multiprocessing.queues
import sys
import httpretty
import pytest
from sphinx import build_main
def run_build_main(docs_dir, html_dir, overflow):
"""Run build_main().
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param iter overflow: Append these args to sphinx-build call.
:return: Value from build_main().
:rtype: int
"""
argv = ('sphinx-build', str(docs_dir), str(html_dir))
if overflow:
argv += overflow
result = build_main(argv)
return result
def run_build_main_post_multiprocessing(docs_dir, html_dir, cached_responses, queue, overflow):
"""Run Sphinx's build_main after setting up httpretty mock responses. Called by multiprocess.Process.
Need to use this instead of httpretty pytest fixtures since forking doesn't exist in Windows and multiprocess runs
in "spawn" mode. This means that everything setup by pytest is lost since subprocesses are generated from scratch on
Windows.
:raise: RuntimeError on Sphinx non-zero exit. This causes multiprocessing.Process().exitcode to be != 0.
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param dict cached_responses: URL keys and serialized JSON values.
:param multiprocessing.queues.Queue queue: Queue to transmit stdout/err back to parent process.
:param iter overflow: Append these args to sphinx-build call.
"""
# Capture stdout/stderr after forking/spawning.
capture = __import__('_pytest').capture
try:
capsys = capture.CaptureFixture(capture.SysCapture)
except TypeError:
capsys = capture.CaptureFixture(capture.SysCapture, None)
getattr(capsys, '_start')()
# Re-run httpretty on Windows (due to lack of forking).
if sys.platform == 'win32':
httpretty.enable()
if cached_responses:
for url, body in cached_responses.items():
httpretty.register_uri(httpretty.GET, url, body=body)
# Run.
result = run_build_main(docs_dir, html_dir, overflow)
stdout, stderr = capsys.readouterr()
queue.put((stdout, stderr))
if result != 0:
raise RuntimeError(result, stdout, stderr)
def pytest_namespace():
"""Add objects to the pytest namespace.
E.g. Returning {'func': lambda: True} allows import pytest; assert pytest.func() is True.
:return: Namespace names and objects.
:rtype: dict
"""
def add_page(root, name, append=''):
"""Add a page to the sample Sphinx docs.
:param py.path.local root: Path to docs root dir.
:param str name: Page name.
:param str append: Append text to RST document body.
:return: Path to new page RST file.
:rtype: py.path.local
"""
root.join('contents.rst').write(' {}\n'.format(name), mode='a')
page = root.join('{}.rst'.format(name))
page.write('.. _{}:\n\n{}\n{}\n\n{}'.format(name, name.capitalize(), '=' * len(name), append))
return page
def build_isolated(docs_dir, html_dir, cached_responses, overflow=None):
"""Run build_main() through multiprocessing.Process.
:param str docs_dir: Path to input docs directory.
:param str html_dir: Path to output html directory.
:param dict cached_responses: URL keys and serialized JSON values.
:param iter overflow: Append these args to sphinx-build call.
:return: Exit code of subprocess, stdout, and stderr.
:rtype: tuple
"""
queue = multiprocessing.Queue()
args = docs_dir, html_dir, cached_responses, queue, overflow
child = multiprocessing.Process(target=run_build_main_post_multiprocessing, args=args)
child.start()
child.join()
result = child.exitcode
try:
stdout, stderr = queue.get(False)
except multiprocessing.queues.Empty:
stdout, stderr = '', ''
return result, stdout, stderr
return dict(add_page=add_page, build_isolated=build_isolated)
@pytest.fixture
def docs(tmpdir):
"""Create sample docs used in this test module.
:param tmpdir: pytest fixture.
:return: Path to docs root.
:rtype: py.path
"""
root = tmpdir.ensure_dir('docs')
# Create Sphinx config.
root.join('conf.py').write("extensions = ['sphinxcontrib.imgur']\nimgur_client_id = 'a0b1c2d3e4f56789'\n")
# Create Sphinx docs.
root.join('contents.rst').write(
'Test\n'
'====\n'
'\n'
'Sample documentation.\n'
'\n'
'.. toctree::\n'
' ignore\n'
)
root.join('ignore.rst').write('.. _ignore:\n\nIgnore\n======\n\nHello World.\n')
return root
| [
"\"\"\"Fixtures for tests in this directory.\"\"\"\n",
"\n",
"import multiprocessing\n",
"import multiprocessing.queues\n",
"import sys\n",
"\n",
"import httpretty\n",
"import pytest\n",
"from sphinx import build_main\n",
"\n",
"\n",
"def run_build_main(docs_dir, html_dir, overflow):\n",
" \"\"\"Run build_main().\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
"\n",
" :return: Value from build_main().\n",
" :rtype: int\n",
" \"\"\"\n",
" argv = ('sphinx-build', str(docs_dir), str(html_dir))\n",
" if overflow:\n",
" argv += overflow\n",
" result = build_main(argv)\n",
" return result\n",
"\n",
"\n",
"def run_build_main_post_multiprocessing(docs_dir, html_dir, cached_responses, queue, overflow):\n",
" \"\"\"Run Sphinx's build_main after setting up httpretty mock responses. Called by multiprocess.Process.\n",
"\n",
" Need to use this instead of httpretty pytest fixtures since forking doesn't exist in Windows and multiprocess runs\n",
" in \"spawn\" mode. This means that everything setup by pytest is lost since subprocesses are generated from scratch on\n",
" Windows.\n",
"\n",
" :raise: RuntimeError on Sphinx non-zero exit. This causes multiprocessing.Process().exitcode to be != 0.\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param dict cached_responses: URL keys and serialized JSON values.\n",
" :param multiprocessing.queues.Queue queue: Queue to transmit stdout/err back to parent process.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
" \"\"\"\n",
" # Capture stdout/stderr after forking/spawning.\n",
" capture = __import__('_pytest').capture\n",
" try:\n",
" capsys = capture.CaptureFixture(capture.SysCapture)\n",
" except TypeError:\n",
" capsys = capture.CaptureFixture(capture.SysCapture, None)\n",
" getattr(capsys, '_start')()\n",
"\n",
" # Re-run httpretty on Windows (due to lack of forking).\n",
" if sys.platform == 'win32':\n",
" httpretty.enable()\n",
" if cached_responses:\n",
" for url, body in cached_responses.items():\n",
" httpretty.register_uri(httpretty.GET, url, body=body)\n",
"\n",
" # Run.\n",
" result = run_build_main(docs_dir, html_dir, overflow)\n",
" stdout, stderr = capsys.readouterr()\n",
" queue.put((stdout, stderr))\n",
" if result != 0:\n",
" raise RuntimeError(result, stdout, stderr)\n",
"\n",
"\n",
"def pytest_namespace():\n",
" \"\"\"Add objects to the pytest namespace.\n",
"\n",
" E.g. Returning {'func': lambda: True} allows import pytest; assert pytest.func() is True.\n",
"\n",
" :return: Namespace names and objects.\n",
" :rtype: dict\n",
" \"\"\"\n",
" def add_page(root, name, append=''):\n",
" \"\"\"Add a page to the sample Sphinx docs.\n",
"\n",
" :param py.path.local root: Path to docs root dir.\n",
" :param str name: Page name.\n",
" :param str append: Append text to RST document body.\n",
"\n",
" :return: Path to new page RST file.\n",
" :rtype: py.path.local\n",
" \"\"\"\n",
" root.join('contents.rst').write(' {}\\n'.format(name), mode='a')\n",
" page = root.join('{}.rst'.format(name))\n",
" page.write('.. _{}:\\n\\n{}\\n{}\\n\\n{}'.format(name, name.capitalize(), '=' * len(name), append))\n",
" return page\n",
"\n",
" def build_isolated(docs_dir, html_dir, cached_responses, overflow=None):\n",
" \"\"\"Run build_main() through multiprocessing.Process.\n",
"\n",
" :param str docs_dir: Path to input docs directory.\n",
" :param str html_dir: Path to output html directory.\n",
" :param dict cached_responses: URL keys and serialized JSON values.\n",
" :param iter overflow: Append these args to sphinx-build call.\n",
"\n",
" :return: Exit code of subprocess, stdout, and stderr.\n",
" :rtype: tuple\n",
" \"\"\"\n",
" queue = multiprocessing.Queue()\n",
" args = docs_dir, html_dir, cached_responses, queue, overflow\n",
" child = multiprocessing.Process(target=run_build_main_post_multiprocessing, args=args)\n",
" child.start()\n",
" child.join()\n",
" result = child.exitcode\n",
" try:\n",
" stdout, stderr = queue.get(False)\n",
" except multiprocessing.queues.Empty:\n",
" stdout, stderr = '', ''\n",
" return result, stdout, stderr\n",
"\n",
" return dict(add_page=add_page, build_isolated=build_isolated)\n",
"\n",
"\n",
"@pytest.fixture\n",
"def docs(tmpdir):\n",
" \"\"\"Create sample docs used in this test module.\n",
"\n",
" :param tmpdir: pytest fixture.\n",
"\n",
" :return: Path to docs root.\n",
" :rtype: py.path\n",
" \"\"\"\n",
" root = tmpdir.ensure_dir('docs')\n",
"\n",
" # Create Sphinx config.\n",
" root.join('conf.py').write(\"extensions = ['sphinxcontrib.imgur']\\nimgur_client_id = 'a0b1c2d3e4f56789'\\n\")\n",
"\n",
" # Create Sphinx docs.\n",
" root.join('contents.rst').write(\n",
" 'Test\\n'\n",
" '====\\n'\n",
" '\\n'\n",
" 'Sample documentation.\\n'\n",
" '\\n'\n",
" '.. toctree::\\n'\n",
" ' ignore\\n'\n",
" )\n",
" root.join('ignore.rst').write('.. _ignore:\\n\\nIgnore\\n======\\n\\nHello World.\\n')\n",
"\n",
" return root\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010416666666666666,
0.009433962264150943,
0,
0.008403361344537815,
0.008264462809917356,
0,
0,
0.009174311926605505,
0,
0,
0,
0,
0.01,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010638297872340425,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009708737864077669,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.010526315789473684,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.009009009009009009,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0.011764705882352941,
0,
0
] | 142 | 0.000756 |
# Copyright 2017 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
LOG = logging.getLogger(__name__)
# A list of features and their supported microversions. Note that these are
# explicit functioning versions, not a range.
# There should be a minimum of two versions per feature. The first entry in
# this list should always be the lowest possible API microversion for a
# feature i.e. the version at which that feature was introduced. The second
# entry should be the current service version when the feature was added to
# horizon.
# Further documentation can be found at
# https://docs.openstack.org/horizon/latest/contributor/topics/
# microversion_support.html
MICROVERSION_FEATURES = {
"nova": {
"locked_attribute": ["2.9", "2.42"],
"instance_description": ["2.19", "2.42"],
"remote_console_mks": ["2.8", "2.53"],
"servergroup_soft_policies": ["2.15", "2.60"],
"servergroup_user_info": ["2.13", "2.60"],
"multiattach": ["2.60"],
"auto_allocated_network": ["2.37", "2.42"],
"key_types": ["2.2", "2.9"],
"key_type_list": ["2.9"],
},
"cinder": {
"groups": ["3.27", "3.43", "3.48", "3.58"],
"consistency_groups": ["2.0", "3.10"],
"message_list": ["3.5", "3.29"],
"limits_project_id_query": ["3.43", "3.50", "3.55"],
}
}
class MicroVersionNotFound(Exception):
def __init__(self, features):
self.features = features
def __str__(self):
return "Insufficient microversion for %s" % self.features
def get_requested_versions(service, features):
if not features:
return None
# Convert a single feature string into a list for backward compatibility.
if isinstance(features, str):
features = [features]
try:
service_features = MICROVERSION_FEATURES[service]
except KeyError:
LOG.debug("'%s' could not be found in the MICROVERSION_FEATURES dict",
service)
return None
feature_versions = set(service_features[features[0]])
for feature in features[1:]:
feature_versions &= set(service_features[feature])
if not feature_versions:
return None
# Sort version candidates from larger versins
feature_versions = sorted(feature_versions, reverse=True,
key=lambda v: [int(i) for i in v.split('.')])
return feature_versions
# NOTE(robcresswell): Since each client implements their own wrapper class for
# API objects, we'll need to allow that to be passed in. In the future this
# should be replaced by some common handling in Oslo.
def get_microversion_for_features(service, features, wrapper_class,
min_ver, max_ver):
"""Retrieves that highest known functional microversion for features"""
feature_versions = get_requested_versions(service, features)
if not feature_versions:
return None
for version in feature_versions:
microversion = wrapper_class(version)
if microversion.matches(min_ver, max_ver):
return microversion
return None
| [
"# Copyright 2017 Cisco Systems\n",
"#\n",
"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n",
"# not use this file except in compliance with the License. You may obtain\n",
"# a copy of the License at\n",
"#\n",
"# http://www.apache.org/licenses/LICENSE-2.0\n",
"#\n",
"# Unless required by applicable law or agreed to in writing, software\n",
"# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n",
"# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n",
"# License for the specific language governing permissions and limitations\n",
"# under the License.\n",
"\n",
"import logging\n",
"\n",
"LOG = logging.getLogger(__name__)\n",
"\n",
"# A list of features and their supported microversions. Note that these are\n",
"# explicit functioning versions, not a range.\n",
"# There should be a minimum of two versions per feature. The first entry in\n",
"# this list should always be the lowest possible API microversion for a\n",
"# feature i.e. the version at which that feature was introduced. The second\n",
"# entry should be the current service version when the feature was added to\n",
"# horizon.\n",
"# Further documentation can be found at\n",
"# https://docs.openstack.org/horizon/latest/contributor/topics/\n",
"# microversion_support.html\n",
"MICROVERSION_FEATURES = {\n",
" \"nova\": {\n",
" \"locked_attribute\": [\"2.9\", \"2.42\"],\n",
" \"instance_description\": [\"2.19\", \"2.42\"],\n",
" \"remote_console_mks\": [\"2.8\", \"2.53\"],\n",
" \"servergroup_soft_policies\": [\"2.15\", \"2.60\"],\n",
" \"servergroup_user_info\": [\"2.13\", \"2.60\"],\n",
" \"multiattach\": [\"2.60\"],\n",
" \"auto_allocated_network\": [\"2.37\", \"2.42\"],\n",
" \"key_types\": [\"2.2\", \"2.9\"],\n",
" \"key_type_list\": [\"2.9\"],\n",
" },\n",
" \"cinder\": {\n",
" \"groups\": [\"3.27\", \"3.43\", \"3.48\", \"3.58\"],\n",
" \"consistency_groups\": [\"2.0\", \"3.10\"],\n",
" \"message_list\": [\"3.5\", \"3.29\"],\n",
" \"limits_project_id_query\": [\"3.43\", \"3.50\", \"3.55\"],\n",
" }\n",
"}\n",
"\n",
"\n",
"class MicroVersionNotFound(Exception):\n",
" def __init__(self, features):\n",
" self.features = features\n",
"\n",
" def __str__(self):\n",
" return \"Insufficient microversion for %s\" % self.features\n",
"\n",
"\n",
"def get_requested_versions(service, features):\n",
" if not features:\n",
" return None\n",
" # Convert a single feature string into a list for backward compatibility.\n",
" if isinstance(features, str):\n",
" features = [features]\n",
" try:\n",
" service_features = MICROVERSION_FEATURES[service]\n",
" except KeyError:\n",
" LOG.debug(\"'%s' could not be found in the MICROVERSION_FEATURES dict\",\n",
" service)\n",
" return None\n",
"\n",
" feature_versions = set(service_features[features[0]])\n",
" for feature in features[1:]:\n",
" feature_versions &= set(service_features[feature])\n",
" if not feature_versions:\n",
" return None\n",
" # Sort version candidates from larger versins\n",
" feature_versions = sorted(feature_versions, reverse=True,\n",
" key=lambda v: [int(i) for i in v.split('.')])\n",
" return feature_versions\n",
"\n",
"\n",
"# NOTE(robcresswell): Since each client implements their own wrapper class for\n",
"# API objects, we'll need to allow that to be passed in. In the future this\n",
"# should be replaced by some common handling in Oslo.\n",
"def get_microversion_for_features(service, features, wrapper_class,\n",
" min_ver, max_ver):\n",
" \"\"\"Retrieves that highest known functional microversion for features\"\"\"\n",
" feature_versions = get_requested_versions(service, features)\n",
" if not feature_versions:\n",
" return None\n",
"\n",
" for version in feature_versions:\n",
" microversion = wrapper_class(version)\n",
" if microversion.matches(min_ver, max_ver):\n",
" return microversion\n",
" return None\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 96 | 0 |
import numpy
from chainer.backends import cuda
from chainer.functions.loss import black_out
from chainer import link
from chainer.utils import walker_alias
from chainer import variable
class BlackOut(link.Link):
"""BlackOut loss layer.
.. seealso:: :func:`~chainer.functions.black_out` for more detail.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
Attributes:
W (~chainer.Parameter): Weight parameter matrix.
"""
sample_data = None
def __init__(self, in_size, counts, sample_size):
super(BlackOut, self).__init__()
vocab_size = len(counts)
p = numpy.array(counts, dtype=numpy.float32)
self.sampler = walker_alias.WalkerAlias(p)
self.sample_size = sample_size
with self.init_scope():
self.W = variable.Parameter(shape=(vocab_size, in_size))
def to_cpu(self):
super(BlackOut, self).to_cpu()
self.sampler.to_cpu()
def to_gpu(self, device=None):
with cuda._get_device(device):
super(BlackOut, self).to_gpu()
self.sampler.to_gpu()
def forward(self, x, t):
"""Computes the loss value for given input and ground truth labels.
Args:
x (~chainer.Variable): Input of the weight matrix multiplication.
t (~chainer.Variable): Batch of ground truth labels.
Returns:
~chainer.Variable: Loss value.
"""
batch_size = x.shape[0]
if self.sample_data is not None:
# for test
sample_data = self.sample_data
else:
shape = (batch_size, self.sample_size)
sample_data = self.sampler.sample(shape)
samples = variable.Variable(sample_data)
return black_out.black_out(x, t, self.W, samples)
| [
"import numpy\n",
"\n",
"from chainer.backends import cuda\n",
"from chainer.functions.loss import black_out\n",
"from chainer import link\n",
"from chainer.utils import walker_alias\n",
"from chainer import variable\n",
"\n",
"\n",
"class BlackOut(link.Link):\n",
"\n",
" \"\"\"BlackOut loss layer.\n",
"\n",
" .. seealso:: :func:`~chainer.functions.black_out` for more detail.\n",
"\n",
" Args:\n",
" in_size (int): Dimension of input vectors.\n",
" counts (int list): Number of each identifiers.\n",
" sample_size (int): Number of negative samples.\n",
"\n",
" Attributes:\n",
" W (~chainer.Parameter): Weight parameter matrix.\n",
"\n",
" \"\"\"\n",
"\n",
" sample_data = None\n",
"\n",
" def __init__(self, in_size, counts, sample_size):\n",
" super(BlackOut, self).__init__()\n",
" vocab_size = len(counts)\n",
" p = numpy.array(counts, dtype=numpy.float32)\n",
" self.sampler = walker_alias.WalkerAlias(p)\n",
" self.sample_size = sample_size\n",
"\n",
" with self.init_scope():\n",
" self.W = variable.Parameter(shape=(vocab_size, in_size))\n",
"\n",
" def to_cpu(self):\n",
" super(BlackOut, self).to_cpu()\n",
" self.sampler.to_cpu()\n",
"\n",
" def to_gpu(self, device=None):\n",
" with cuda._get_device(device):\n",
" super(BlackOut, self).to_gpu()\n",
" self.sampler.to_gpu()\n",
"\n",
" def forward(self, x, t):\n",
" \"\"\"Computes the loss value for given input and ground truth labels.\n",
"\n",
" Args:\n",
" x (~chainer.Variable): Input of the weight matrix multiplication.\n",
" t (~chainer.Variable): Batch of ground truth labels.\n",
"\n",
" Returns:\n",
" ~chainer.Variable: Loss value.\n",
"\n",
" \"\"\"\n",
"\n",
" batch_size = x.shape[0]\n",
" if self.sample_data is not None:\n",
" # for test\n",
" sample_data = self.sample_data\n",
" else:\n",
" shape = (batch_size, self.sample_size)\n",
" sample_data = self.sampler.sample(shape)\n",
" samples = variable.Variable(sample_data)\n",
" return black_out.black_out(x, t, self.W, samples)\n"
] | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | 67 | 0 |