content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
"""
@author: Youye
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
#%% Define the ResNet block
class ResNetBlock(nn.Module):
def __init__(self,inter_channel = 128):
super(ResNetBlock, self).__init__()
# RESNET Block Operator
self.conv1 = nn.Conv2d( in_channels=inter_channel , out_channels=inter_channel, kernel_size=(1,1))
self.CN1 = nn.InstanceNorm2d( num_features=inter_channel )
self.BN1 = nn.BatchNorm2d( num_features=inter_channel , affine=False )
self.conv2 = nn.Conv2d( in_channels=inter_channel , out_channels=inter_channel, kernel_size=(1,1))
self.CN2 = nn.InstanceNorm2d( num_features=inter_channel )
self.BN2 = nn.BatchNorm2d( num_features=inter_channel , affine=False )
def forward(self,x):
# define the structure of the ResNetBlock
identity = x;
x = self.conv1(x); #print(x.size())
x = self.CN1(x); #print(x.size())
x = self.BN1(x); #print(x.size())
x = F.relu(x); # print(x.size())
x = self.conv2(x);
x = self.CN2(x);
x = self.BN2(x);
x = F.relu(x);
x = x + identity;
return x
#% Define the network structure
class Net(nn.Module):
def __init__(self, numBlocks1 = 5, numBlocks2=19,inter_channel=128):
self.numBlocks1 = numBlocks1 # for inlier predictor
self.numBlocks2 = numBlocks2 # for object weight predictor
super(Net, self).__init__()
# INPUT layer operator
self.convInt = nn.Conv2d( in_channels=1 , out_channels=inter_channel , kernel_size=(1,5) )
# Common ResNetBlock
layers1 = []
for _ in range(0,self.numBlocks1):
layers1.append( ResNetBlock(inter_channel) )
self.ResNet1 = nn.Sequential(*layers1)
# OUTPUT layer operator
self.convInlierPredictor = nn.Conv2d( in_channels=inter_channel , out_channels=1, kernel_size=(1,1) )
def forward(self, x):
# Input Layer
x = self.convInt(x)
# ResNet blocks
x = self.ResNet1(x)
######### inlier predictor routine ################
#x = self.ResNet3(x)
# [ Batch_size , 128 , num_weight, 1 ]
[batch_size, _, numData,_] = x.shape
# inlier predictor
x = self.convInlierPredictor(x)
x = x.view([batch_size,numData])
#x_inlier = self.linearInlierPredictor1(x_inlier)
#x_inlier = self.linearInlierPredictor2(x_inlier)
x = torch.tanh(x)
x = F.relu(x)
return x
#% Define the network structure
class AngleNet(nn.Module):
def __init__(self, numBlocks = 20,inter_channel=128):
self.numBlocks = numBlocks
super(AngleNet, self).__init__()
# INPUT layer operator
self.convInt = nn.Conv2d( in_channels=1 , out_channels=inter_channel , kernel_size=(1,5) )
# Common ResNetBlock
layers = []
for _ in range(0,self.numBlocks):
layers.append( ResNetBlock(inter_channel) )
self.ResNet = nn.Sequential(*layers)
# OUTPUT layer operator
self.convOut = nn.Conv2d( in_channels=inter_channel , out_channels=9, kernel_size=(1,1) )
self.SoftMax = nn.LogSoftmax(dim=1)
def forward(self, x):
# Input Layer
x = self.convInt(x)
# ResNet blocks
x = self.ResNet(x)
# [ Batch_size , 128 ,numData, 1 ]
[batch_size, _, numData,_] = x.shape
# [ Batch_size , 9 ,numData, 1 ]
x = self.convOut(x)
x = x[:,:,:,0]
x = self.SoftMax(x)
return x |
"""JS/CSS bundles for theme."""
from flask_assets import Bundle
from invenio_assets import NpmBundle
css = NpmBundle(
Bundle(
'scss/styles.scss',
filters='node-scss, cleancss',
output="gen/cd2hrepo.local.styles.%(version)s.css",
depends=('scss/*.scss', ),
),
Bundle(
'node_modules/angular-loading-bar/build/loading-bar.css',
'node_modules/typeahead.js-bootstrap-css/typeaheadjs.css',
'node_modules/bootstrap-switch/dist/css/bootstrap3'
'/bootstrap-switch.css',
filters='cleancss',
output="gen/cd2hrepo.external.styles.%(version)s.css",
),
depends=('scss/*.scss', ),
output="gen/cd2hrepo.%(version)s.css",
npm={
'bootstrap-sass': '~3.3.5',
'bootstrap-switch': '~3.0.2',
'font-awesome': '~4.7.0',
'typeahead.js-bootstrap-css': '~1.2.1',
}
)
"""Default CSS bundle."""
js = Bundle(
NpmBundle(
'node_modules/almond/almond.js',
'js/settings.js',
filters='uglifyjs',
output="gen/cd2hrepo.external.%(version)s.js",
npm={
'almond': '~0.3.1',
'angular': '~1.4.9',
'jquery': '~1.9.1',
}
),
Bundle(
'js/base.js',
output="gen/cd2hrepo.base.%(version)s.js",
filters='requirejs',
),
filters='jsmin',
output='gen/packed.%(version)s.js',
)
"""Default JavaScript bundle with Almond, JQuery and RequireJS."""
|
import base64
import json
configs = [{
"v": "2",
"ps": "ibm",
"add": "v2ray_ip",
"port": "30080",
"id": "18ad2c9c-a88b-48e8-aa64-5dee0045c282",
"aid": "0",
"net": "kcp",
"type": "wechat-video",
"host": "",
"path": "",
"tls": ""
}, {
"v": "2",
"ps": "cf",
"add": "104.19.96.0",
"port": "443",
"id": "18ad2c9c-a88b-48e8-aa64-5dee0045c282",
"aid": "0",
"net": "ws",
"type": "none",
"host": "v2ray_host",
"path": "ws",
"tls": "tls"
}]
urls = []
for conf in configs:
urls.append("vmess://" + base64.urlsafe_b64encode(json.dumps(conf).encode()).decode())
print base64.urlsafe_b64encode("\n".join(urls)).decode()
|
from flask import Flask, request
import db_utils
import email_utils
import git_utils
app = Flask(__name__)
@app.route('/publish', methods=["GET", "POST"])
def publish_to_cloud():
key = request.json['key']
msg = email_utils.fetch_raw_email_from_aws(key)
filepath = '_posts/' + email_utils.fetch_filename(msg)
post = email_utils.format_post(msg)
email = email_utils.fetch_from_address(msg)
repo = db_utils.get_blog_from_email(email)
git_utils.push_file_to_github(filepath, post, repo)
return 'Thanks', 200
@app.route('/register', methods=["GET", "POST"])
def register_user():
key = request.json['key']
blog = request.json['blog']
msg = email_utils.fetch_raw_email_from_aws(key)
email = email_utils.fetch_from_address(msg)
db_utils.register_user(email, blog)
git_utils.update_template(blog)
return 'Registered', 200
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python3
import re
'''
Example rules:
* light red bags contain 1 bright white bag, 2 muted yellow bags.
* dark orange bags contain 3 bright white bags, 4 muted yellow bags.
* bright white bags contain 1 shiny gold bag.
* muted yellow bags contain 2 shiny gold bags, 9 faded blue bags.
* shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
* dark olive bags contain 3 faded blue bags, 4 dotted black bags.
* vibrant plum bags contain 5 faded blue bags, 6 dotted black bags.
* faded blue bags contain no other bags.
* dotted black bags contain no other bags.
Consider again your shiny gold bag and the rules from the above example:
* faded blue bags contain 0 other bags.
* dotted black bags contain 0 other bags.
* vibrant plum bags contain 11 other bags: 5 faded blue bags and 6 dotted black bags.
* dark olive bags contain 7 other bags: 3 faded blue bags and 4 dotted black bags.
So, a single shiny gold bag must contain 1 dark olive bag (and the 7 bags within it) plus 2 vibrant plum bags (and the 11 bags within each of those): 1 + 1*7 + 2 + 2*11 = 32 bags!
'''
g_file = 'input.txt'
#------------------------------------------------------------------------------
def run():
#------------------------------------------------------------------------------
'''
l_rules['shiny gold'] = [ (1, 'dark olive'), (2, 'vibrant plum') ]
'''
l_rules = dict()
for l_line in open(g_file).readlines():
(l_outer, l_inner_list) = parse_rule(l_line)
if l_inner_list == None:
continue
l_rules[l_outer] = l_inner_list
l_count = count_bags('shiny gold', l_rules)
print("1 shiny gold bag holds {} bags".format(l_count))
'''
count the number of bags contained by this bag type
'''
#------------------------------------------------------------------------------
def count_bags(x_desc, x_rules):
#------------------------------------------------------------------------------
if x_desc not in x_rules: return 0
l_total = 0
for l_tuple in x_rules[x_desc]:
(l_quantity, l_desc) = l_tuple
l_total += l_quantity + l_quantity * count_bags(l_desc, x_rules)
return l_total
'''
Parse a rule, returning:
(<outer bag description>, list())
If list is None, then the outer bag can't hold any bags.
Otherwise, the list contains tuples containing quantity and color of inner bags
'''
#------------------------------------------------------------------------------
def parse_rule(x_rule):
#------------------------------------------------------------------------------
l_match = re.search("(.*) bags contain (.*)", x_rule)
l_inner_list = None
if not l_match:
print("Error. No match for line {}".format(x_rule))
exit(1)
(l_outer, l_inner) = l_match.group(1, 2)
if 'no other bags' in l_inner:
pass
else:
l_inner_list = list()
for l_sub in l_inner.split(','):
l_curr_match = re.search("(\d+) (\w+\s\w+) bag", l_sub)
if not l_curr_match:
print("Error. No match on rule {} sub rule {}".format(x_rule, l_sub))
exit(1)
(l_quantity, l_style) = l_curr_match.group(1, 2)
l_quantity = int(l_quantity)
l_inner_list.append((l_quantity, l_style))
return (l_outer, l_inner_list)
#------------------------------------------------------------------------------
def main():
#------------------------------------------------------------------------------
run()
main() |
from abc import abstractmethod
import logging
import functools
logger = logging.getLogger(__name__)
# GLOBALS
METHODS_MAP_CODE = {}
METHODS_MAP_DATA = {}
class SerializerBase(object):
""" Adds shared functionality for all serializer implementations
"""
def __init_subclass__(cls, *args, **kwargs):
""" This forces all child classes to register themselves as
methods for serializing code or data
"""
super().__init_subclass__(*args, **kwargs)
if cls._for_code:
METHODS_MAP_CODE[cls._identifier] = cls
if cls._for_data:
METHODS_MAP_DATA[cls._identifier] = cls
@property
def identifier(self):
""" Get the identifier of the serialization method
Returns
-------
identifier : str
"""
return self._identifier
def chomp(self, payload):
""" If the payload starts with the identifier, return the remaining block
Parameters
----------
payload : str
Payload blob
"""
s_id, payload = payload.split(b'\n', 1)
if (s_id + b'\n') != self.identifier:
raise TypeError("Buffer does not start with parsl.serialize identifier:{}".format(self.identifier))
return payload
def enable_caching(self, maxsize=128):
""" Add functools.lru_cache onto the serialize, deserialize methods
"""
self.serialize = functools.lru_cache(maxsize=maxsize)(self.serialize)
self.deserialize = functools.lru_cache(maxsize=maxsize)(self.deserialize)
return
@abstractmethod
def serialize(self, data):
pass
@abstractmethod
def deserialize(self, payload):
pass
|
#!/usr/bin/env python3
import random
from math import sqrt
import numpy as np
#---------------------------------------------------------------
# Data to be generated
# (1) number of phrases
# (2) number of total words
# (3) words per phrase (2/1)
# (4) number of non-function words
# (5) non-function words per phrase (4/1)
# (6) mean of total words
# (7) std. dev. of total words
# (8) mean of non-function words
# (9) std. dev. of non-function words
# (10) mean of ln(word frequency) (total words)
# (11) std. dev. of ln(word frequency) (total words)
# (12) mean of ln(word frequency) (non-function words)
# (13) std. dev. of ln(word frequency) (non-function words)
# constant term = 1
# scroll time for the current page based on a gamma dist.
#---------------------------------------------------------------
def generate_page_data():
data = []
for i in range(3):
# generating (1) using range of 1-4 --- E(X) = 2.5
num_sentences = random.randint(1,4)
data.append(num_sentences)
# generating (2) --- using range of 30-50 --- E(X) = 40
num_words = random.randint(30,50)
data.append(num_words)
# generating (3) --- dividing (2)/(1) --- E(X) = 16
data.append(round((num_words/num_sentences),4))
# generating (4) --- using range of 15-25 --- E(X) = 20
num_words_nf = random.randint(15,25)
data.append(num_words_nf)
# generating (5) --- dividing (4)/(1) --- E(X) = 8
data.append(round((num_words_nf/num_sentences),4))
# generating (6) --- using range of 3-8 --- E(X) = 5.5
data.append(random.randint(3,8))
# generating (7) --- using range of 1.5-2.5 --- E(X) = 2
data.append(round(random.uniform(1.5,2.5),4))
# generating (8) --- using range of 4-9 --- E(X) = 6.5
data.append(random.randint(4,9))
# generating (9) --- using range of 2-3 --- E(X) = 2.5
data.append(round(random.uniform(2.0,3.0),4))
# generating (10) --- using range of 11.2-16.6 --- E(X) = 13.9
data.append(round(random.uniform(11.2,16.6),4))
# generating (11) --- using range of 1.0-2.0 --- E(X) = 1.5
data.append(round(random.uniform(1.0,2.0),4))
# generating (12) --- using range of 11.2-15.0 --- E(X) = 13.1
data.append(round(random.uniform(11.2,15.0),4))
# generating (13) --- using range of 1.0-2.0 --- E(X) = 1.5
data.append(round(random.uniform(1.0,2.0),4))
# generating constant = 1
data.append(1)
# generating representation
data.append(sum(data))
return np.asarray(data)
def main():
with open("synthetic_data.csv", mode='w') as WRITE_FILE:
num_iterations = int(input("enter the number of iterations: "))
for i in range(num_iterations):
current_data = generate_page_data()
current_data_str = ','.join(str(elements) for elements in current_data)
print(current_data_str)
WRITE_FILE.write(current_data_str + "\n")
WRITE_FILE.close()
print("---------------------------------------------------------")
print("number of iterations producted", str(num_iterations))
print("---------------------------------------------------------")
if __name__ == '__main__':
main()
|
from django.conf.urls import url
from .. import views
urlpatterns = [
url(
regex=r'^users/?$',
view=views.UserList.as_view(),
name='user_list'
),
url(
regex=r'^users/(?P<pk>\d+)/?$',
view=views.UserDetail.as_view(),
name='user_detail'
),
]
|
import os
from flask import Flask
import urllib.parse
from combien import Combien
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World"
@app.route("/price/<path:url>")
def price(url):
c = Combien(urllib.parse.unquote(url))
return c.price()
if __name__ == "__main__":
app.run(debug=True)
|
from selenium import webdriver
import pickle
import json
import os
from process import process
def do_login(browser, url):
browser.get(url)
cookies = browser.get_cookies()
with open('cookies.pickle', 'wb') as f:
pickle.dump(cookies, f)
input('Login in the browser, then press Enter to continue here...')
cookies = browser.get_cookies()
with open('cookies.pickle', 'wb') as f:
pickle.dump(cookies, f)
def scrape(browser, url):
if not os.path.isfile('cookies.pickle'):
do_login(browser, url)
browser.get('https://musicleague.app')
with open('cookies.pickle', 'rb') as f:
cookies = pickle.load(f)
for cookie in cookies:
browser.add_cookie(cookie)
print('Logged in')
browser.get(url)
# Just get the number of rounds
round_count = len(browser.find_elements_by_link_text('Round Results'))
rounds = []
round_names = [x.text for x in browser.find_elements_by_class_name('round-title')]
assert(round_count == len(round_names))
for round_num, round_name in enumerate(round_names):
browser.get(url)
browser.find_elements_by_link_text('Round Results')[round_num].click()
song_names = [x.text for x in browser.find_elements_by_class_name('name') if x.tag_name=='a' and len(x.text) > 0]
submitters = [x.text[13:] for x in browser.find_elements_by_class_name('submitter') if x.tag_name=='span' and len(x.text) > 0]
vote_containers = browser.find_elements_by_class_name('vote-breakdown')
all_voters = []
all_vote_counts = []
for vote_container in vote_containers:
upvotes = vote_container.find_elements_by_class_name('upvote')
upvotes = upvotes[:len(upvotes)//2] # half are hidden
all_vote_counts.append([ int(upvote.find_element_by_class_name('vote-count').text) for upvote in upvotes ])
all_voters.append([ upvote.find_element_by_class_name('voter').text for upvote in upvotes ])
songs = []
for song_name, submitter, voters, vote_counts in zip(song_names, submitters, all_voters, all_vote_counts):
song = {
'name': song_name,
'submitter': submitter,
'votes': {voter:count for voter, count in zip(voters, vote_counts)}
}
songs.append(song)
rounds.append({
'name': round_name,
'songs': songs
})
with open('data.json', 'w') as f:
json.dump(rounds, f)
return rounds
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('url', help='URL for the music league - https://musicleague.app/l/<base64 crap>/')
parser.add_argument('--login', '-l', action='store_true', help="Login to music league. Try this if the scraper isn't running correctly.")
parser.add_argument('--process', '-p', action='store_true', help="Immediately process the data to csv (semicolon-separated) and print this.")
args = parser.parse_args()
browser = webdriver.Chrome()
if args.login:
do_login(browser, args.url)
data = scrape(browser, args.url)
if args.process:
process(data)
browser.close()
|
import logging
import urllib
logger = logging.getLogger("municipal_finance")
class ApiClient(object):
def __init__(self, get, api_url):
self.get = get
self.api_url = api_url + "/cubes/"
def api_get(self, query):
if query["query_type"] == "aggregate":
url = self.api_url + query["cube"] + "/aggregate"
params = {
"aggregates": query["aggregate"],
"cut": self.format_cut_param(query["cut"]),
"drilldown": "|".join(query["drilldown"]),
"page": 0,
}
if query.get("order"):
params["order"] = query.get("order")
else:
params["order"] = "financial_year_end.year:desc,item.code:asc"
elif query["query_type"] == "facts":
url = self.api_url + query["cube"] + "/facts"
params = {"fields": ",".join(
field for field in query["fields"]), "page": 0}
if query.get("cut"):
params["cut"] = self.format_cut_param(query.get("cut"))
if query.get("order"):
params["order"] = query.get("order")
elif query["query_type"] == "model":
url = self.api_url + query["cube"] + "/model"
params = {}
params["pagesize"] = 20000
logger.debug("API query %s?%s" % (url, urllib.parse.urlencode(params)))
return self.get(url, params)
def format_cut_param(self, cuts):
keypairs = []
for key, vals in cuts.items():
vals_as_strings = []
for val in vals:
if type(val) == str:
vals_as_strings.append('"' + val + '"')
if type(val) == int:
vals_as_strings.append(str(val))
keypairs.append((key, ";".join(vals_as_strings)))
return "|".join("{!s}:{!s}".format(pair[0], pair[1]) for pair in keypairs)
|
#!usr/bin/env python
"""
Postprocessor subclass.
"""
from collections import namedtuple
import general_utils
import rapid_config
from postproc import postproc
from postproc import postproc_options
from robotmath import transforms
# PARAMS
__a1 = 'A1'
__a2 = 'A2'
__a3 = 'A3'
__a4 = 'A4'
__a5 = 'A5'
__a6 = 'A6'
__e1 = 'E1'
__e2 = 'E2'
__e3 = 'E3'
__e4 = 'E4'
__e5 = 'E5'
__e6 = 'E6'
__x = 'X'
__y = 'Y'
__z = 'Z'
__q1 = 'Q1'
__q2 = 'Q2'
__q3 = 'Q3'
__q4 = 'Q4'
__c1 = 'C1'
__c2 = 'C2'
__c3 = 'C3'
__c4 = 'C4'
__params = 'params'
__identifier = 'identifier'
__value = 'value'
__move_type = 'type'
__move_target = 'target'
__move_speed = 'speed'
__move_zone = 'zone'
__move_tool = 'tool'
__move_wobj = 'wobj'
# STRUCTURES
MOVE = 'MOVE'
MOVE_L = 'MoveL'
MOVE_J = 'MoveJ'
MOVE_ABS_J = 'MoveAbsJ'
__move_structure = namedtuple(
MOVE, [
__move_type,
__move_target,
__move_speed,
__move_zone,
__move_tool,
__move_wobj
]
)
VARIABLE = 'VARIABLE'
__variable_structure = namedtuple(
VARIABLE, [
__params
]
)
JOINTTARGET = 'JOINTTARGET'
__jointtarget_structure = namedtuple(
JOINTTARGET, [
__a1,
__a2,
__a3,
__a4,
__a5,
__a6,
__e1,
__e2,
__e3,
__e4,
__e5,
__e6
]
)
ROBTARGET = 'ROBTARGET'
__robtarget_structure = namedtuple(
ROBTARGET, [
__x,
__y,
__z,
__q1,
__q2,
__q3,
__q4,
__c1,
__c2,
__c3,
__c4,
__e1,
__e2,
__e3,
__e4,
__e5,
__e6
]
)
DIGITAL_OUT = 'DO'
__digital_out_structure = namedtuple(
DIGITAL_OUT, [
__identifier,
__value
]
)
STRUCTURES = {
JOINTTARGET: __jointtarget_structure,
ROBTARGET: __robtarget_structure,
DIGITAL_OUT: __digital_out_structure,
MOVE: __move_structure,
VARIABLE: __variable_structure
}
# TEMPLATES
__jointtarget_template = \
'[' \
'[{}, {}, {}, {}, {}, {}], ' \
'[{}, {}, {}, {}, {}, {}]' \
']'
__robtarget_template = \
'[' \
'[{}, {}, {}], ' \
'[{}, {}, {}, {}], ' \
'[{}, {}, {}, {}], ' \
'[{}, {}, {}, {}, {}, {}]' \
']'
__digital_out_template = \
'\t\tSetDO {}, {};'
__variable_template = \
'\t\t{}'
__move_template = \
'\t\t{} {}, {}, {}, {}\\WObj:={};'
TEMPLATES = {
JOINTTARGET: __jointtarget_template,
ROBTARGET: __robtarget_template,
DIGITAL_OUT: __digital_out_template,
MOVE: __move_template,
VARIABLE: __variable_template
}
# COMMANDS
MOTION_COMMAND = 'motion_command'
_motion_command_fields = [
postproc.AXES,
postproc.EXTERNAL_AXES,
postproc.POSE,
postproc.CONFIGURATION
]
MotionCommand = namedtuple(
MOTION_COMMAND, _motion_command_fields
)
IO_COMMAND = 'io_command'
_io_command_fields = [
postproc.DIGITAL_OUTPUT,
postproc.ANALOG_OUTPUT
]
IOCommand = namedtuple(
IO_COMMAND, _io_command_fields
)
class SimpleRAPIDProcessor(postproc.PostProcessor):
"""
Postprocessor subclass.
"""
def __init__(self):
"""
Initialize specific processor.
"""
# Initialize superclass (generic processor)
super(SimpleRAPIDProcessor, self).__init__(
type_robot='ABB',
type_processor='RAPID',
program_file_extension=rapid_config.DEFAULT_FILE_EXTENSION,
def_program_template=rapid_config.DEFAULT_PROGRAM)
# Initialize internal parameters
self.supported_options = self._set_supported_options()
def _process_program(self, processed_commands, opts): # Implement in base class!
"""
Process a list of instructions and fill a program template.
:param processed_commands: List of processed commands.
:param opts: UserOptions tuple
:return:
"""
# Get program structure and template
if opts.Use_motion_as_variables:
formatted_commands = ',\n'.join(processed_commands)
count = len(processed_commands)
try:
program_template = self._read_program_template() # don't overwrite original
if program_template.count('{}') != 2:
raise IndexError
return program_template.format(count, formatted_commands)
except IndexError:
message = 'To use motion parameters as variables, template requires ' \
'2 placeholders, one for number of motion variables and ' \
'another for the motion variables.'
raise IndexError(message)
else:
formatted_commands = '\n'.join(processed_commands)
try:
program_template = self._read_program_template() # don't overwrite original
return program_template.format(formatted_commands)
except IndexError:
message = 'To use motion parameters as commands, template requires ' \
'1 placeholder for the motion variables.'
raise IndexError(message)
@staticmethod
def _process_command(command, opts):
"""
Process a single command with user options.
:param command: Command tuple
:param opts: UserOptions tuple
:return:
"""
command_type = postproc.get_structure_type(command)
if not opts.Ignore_motion and command_type == MOTION_COMMAND:
return _process_motion_command(command, opts)
elif not opts.Ignore_IOs and command_type == IO_COMMAND:
return _process_io_command(command, opts)
@staticmethod
def _format_command(params_dict):
"""
Processor-specific function. Certain types of commands are very specific
to the processor in use or application, such as EntertainTech, requiring
in some cases both Motion and IO datatypes in a single line of code. This
function allows PostProcessor (processor) subclasses to format the input
params flexibly and as needed.
For this processor:
Can create a MotionCommand namedTuple from optional input parameters.
Can create a IOCommand namedTuple from optional input parameters.
:param params_dict: Dictionary of namedtuple containing all command
parameters (i.e. Axes, ExternalAxes, etc).
:return:
"""
# Try to get a MotionCommand
params = []
for field in _motion_command_fields:
param = params_dict[field] if field in params_dict else None
params.append(param)
if params.count(None) != len(params):
return MotionCommand(*params)
else:
# Try to get an IO command
params = []
for field in _io_command_fields:
param = params_dict[field] if field in params_dict else None
params.append(param)
if params.count(None) != len(params):
return IOCommand(*params)
@staticmethod
def _set_supported_options():
"""
Set the supported options for this processor. Only set to True if the
optional parameter is actually supported by this processor!
:return:
"""
# TODO: implement include_pose and use_linear_motion
return postproc_options.configure_user_options(
ignore_motion=True,
use_motion_as_variables=True,
use_nonlinear_motion=True,
use_linear_motion=True,
include_axes=True,
include_external_axes=True,
include_pose=True,
include_configuration=True
)
def _process_motion_command(command, opts): # Implement in base class!
"""
Process motion command.
:param command: Command tuple
:param opts: UserOptions tuple
:return:
"""
motion_type = None
target_data_type = None
target_data = []
# Interpret linear motion command
if opts.Use_linear_motion:
if command.pose is not None:
motion_type = MOVE_L
target_data_type = ROBTARGET
pose = _convert_pose(command.pose)
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in pose]
target_data.extend(params)
if command.configuration is not None:
configuration = _convert_configuration(command.configuration)
params = [general_utils.num_to_str(p, include_sign=False, precision=3, simplify_ints=True)
for p in configuration]
target_data.extend(params)
else:
target_data.extend(rapid_config.DEFAULT_CONF)
if command.external_axes is not None:
external_axes = [axis if axis is not None else '9E9'
for axis in command.external_axes]
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in external_axes]
target_data.extend(params)
else:
target_data.extend(rapid_config.DEFAULT_EXAX)
else:
raise ValueError('Invalid command')
# Interpret nonlinear motion command
elif opts.Use_nonlinear_motion:
if command.axes is not None:
motion_type = MOVE_ABS_J
target_data_type = JOINTTARGET
axes = command.axes
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in axes]
target_data.extend(params)
if command.external_axes is not None:
external_axes = [axis if axis is not None else '9E9'
for axis in command.external_axes]
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in external_axes]
target_data.extend(params)
else:
target_data.extend(rapid_config.DEFAULT_EXAX)
elif command.pose is not None:
motion_type = MOVE_J
target_data_type = ROBTARGET
pose = _convert_pose(command.pose)
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in pose]
target_data.extend(params)
if command.configuration is not None:
configuration = _convert_configuration(command.configuration)
params = [general_utils.num_to_str(p, include_sign=False, precision=3, simplify_ints=True)
for p in configuration]
target_data.extend(params)
else:
target_data.extend(rapid_config.DEFAULT_CONF)
if command.external_axes is not None:
external_axes = [axis if axis is not None else '9E9'
for axis in command.external_axes]
params = [general_utils.num_to_str(p, include_sign=False, precision=3)
for p in external_axes]
target_data.extend(params)
else:
target_data.extend(rapid_config.DEFAULT_EXAX)
else:
raise ValueError('Invalid command')
else: # User never supplied a motion type
raise ValueError('Invalid motion type')
# Structure and format data, command
formatted_target_data = postproc.fill_template(
target_data,
STRUCTURES[target_data_type],
TEMPLATES[target_data_type])
if opts.Use_motion_as_variables:
formatted_variable = postproc.fill_template(
formatted_target_data,
STRUCTURES[VARIABLE],
TEMPLATES[VARIABLE])
return formatted_variable
else:
motion_data = [
motion_type,
formatted_target_data,
rapid_config.DEFAULT_SPEED,
rapid_config.DEFAULT_ZONE,
rapid_config.DEFAULT_TOOL,
rapid_config.DEFAULT_WOBJ]
formatted_motion = postproc.fill_template(
motion_data,
STRUCTURES[MOVE],
TEMPLATES[MOVE])
return formatted_motion
def _process_io_command(command, opts):
"""
Process io command.
:param command: Command tuple
:param opts: UserOptions tuple
:return:
"""
io_data = [] # empty data container
# Interpret digital output command
if opts.Include_digital_output:
if command.digital_output is not None:
io_type = DIGITAL_OUT
for io in command.digital_output:
formatted_io = postproc.fill_template(
io,
STRUCTURES[io_type],
TEMPLATES[io_type])
io_data.append(formatted_io)
# if command.analog_outputs is not None:
# io_type = ANALOG_OUT
# for io in command.analog_outputs:
# formatted_io = postproc.fill_template(
# io,
# STRUCTURES[io_type],
# TEMPLATES[io_type])
# io_data.append(formatted_io)
if io_data:
formatted_ios = '\n'.join(io_data)
return formatted_ios
def _convert_pose(pose):
"""
Convert a Pose tuple to subclass conventions.
:param pose: Pose tuple
:return:
"""
i_vector = [pose.pose_ix, pose.pose_iy, pose.pose_iz]
j_vector = [pose.pose_jx, pose.pose_jy, pose.pose_jz]
k_vector = [pose.pose_kx, pose.pose_ky, pose.pose_kz]
q1, q2, q3, q4 = transforms.quaternion_by_vectors(i_vector, j_vector, k_vector)
return [pose.pose_x, pose.pose_y, pose.pose_z, q1, q2, q3, q4]
def _convert_configuration(configuration):
"""
Convert a Configuration tuple to subclass conventions.
:param configuration: Configuration tuple
:return:
"""
# TODO: This might not be correct!
c1 = not configuration.configuration_1
c2 = not configuration.configuration_2
c3 = not configuration.configuration_3
c4 = 0 # unused
return [c1, c2, c3, c4]
|
__author__ = 'Tristan Watson'
# Keystroke Dynamic software that covers the following key functionality:
# 1. User File management
# 2. Input gathering and management (including storage)
# 3. Plotting of keystrokes taking into consideration both up events and down events.
import pyHook
import pythoncom
import os
import matplotlib.pyplot as plt
import json
import numpy
import sys
# File is to be opened and closed numerous times. Should be re-written as a class.
global userFilePath
time_between_ups = []
time_between_downs = []
def banner():
print("------------------------------")
print("Keystroke Dynamics Software")
print("Author: Tristan Watson, 2015")
print("------------------------------")
print("Current Working Directory: ", os.getcwd())
def menuOptions():
#Menu
print("Please choose a following option:")
print("1: User Login or Create New")
print("2: Username and Password Input")
print("3: Plot Graph (Based on Username)")
print("4: Help")
print("5: Exit")
def menuHandler():
choice = input("Please enter option choice: ")
if choice == "1":
getUserFileWriteSession()
elif choice == "2":
usernamePasswordInput()
elif choice == "3":
plotMenu()
elif choice == "4":
documentation()
elif choice == "5":
print("Program Quitting")
sys.exit()
else:
print("Please select a valid option (1-5)")
menuHandler()
# For writing events
def getUserFileWriteSession():
print("File Location: ", os.getcwd())
username = input("Enter your username: ")
userFileName = (username + ".txt")
# If directory DNE.
if not os.path.isdir((os.path.join("./", "accounts"))):
# Create it.
os.makedirs("accounts")
if os.path.exists(os.path.join("accounts", userFileName)):
userFile = (os.path.join("accounts", userFileName))
else:
print("No File Exists! Creating New User")
if os.path.exists(os.path.join("accounts", userFileName)):
print("Username exists! Load it or choose different name")
else:
userFile = (os.path.join("accounts", userFileName))
writeFile = open(userFile, "w")
# Have to prime a file ready to be used with JSON
fileSetup = json.dumps([])
writeFile.write(fileSetup)
writeFile.close()
print("User Successfully Created", userFile)
print("Your account has been created: ", userFile)
global userFilePath
userFilePath = userFile
# Used for matplotlib only
def getUserFileReadSession():
userFileName = input("Username:") + ".txt"
if os.path.exists(os.path.join("accounts", userFileName)):
userFile = (os.path.join("accounts", userFileName))
open(userFile, "r")
return "File Loaded Successfully"
else:
print("Username does not exist")
def plotMenu():
print("What would you like to plot?")
print("1. Key Up")
print("2. Key Down")
print("3. Back")
print("4. Quit")
plotMenuHandler()
def plotMenuHandler():
plotChoice = input("Choice: ")
if plotChoice == "1":
timeBetweenUPS()
elif plotChoice == "2":
timeBetweenDOWNS()
elif plotChoice == "3":
menuHandler()
elif plotChoice == "4":
sys.exit()
else:
print("Please Choose Valid Option")
def plotGraph(y):
userInput = ("Enter if you want to plot KeyUp or KeyDowns")
data = y
x = list(range(len(data)))
# Average
average = numpy.mean(data)
# Words Per Minute = (Chr / 5) / Time
wpm = len(data) / 5
# MatPlotLib Handling
plt.title("Time Elapsed Between Down Events")
plt.ylabel("Key Number")
plt.ylabel("Milliseconds")
plt.plot(x, y)
# Format average display box
plt.text(5, 35, ("WPM: ", wpm, "Average", average) ,style='italic',
bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})
plt.show()
def documentation():
print ("The menu works in a way that accepts a corresponding number.")
print ("For example, press 2 to enter information.")
print ("A file must be created or loaded first.")
print ("If not defined, program will exit.")
print ("To end input in option '2'. use ESC character")
print ("Option 3 gives an option to either print out a graph of 'up' or 'down' events")
def userRecordData(eventList):
userFile = userFilePath
#Read File to Grab Sessions
readUserFile = open(userFile, "r")
testFile = readUserFile.read()
#print(testFile)
userSessionList = json.loads(testFile)
readUserFile.close()
# Create New Session and Write To File
writeUserFile = open(userFile, "w")
newUserEventList = eventList
userSessionList.append(newUserEventList)
data = json.dumps(userSessionList)
writeUserFile.write(data)
writeUserFile.close()
def timeBetweenUPS():
# Define the list first
eventFile = open(userFilePath, "r")
eventList = json.loads(eventFile.read())
ups = ([(etype, etime) for etype, etime in eventList[0] if etype == "Up"])
while len(ups) > 1:
#Get the time from the tuple
startTime = ups.pop(0)[1]
betweenTime = ups[0][1] - startTime
time_between_ups.append(betweenTime)
#average = numpy.mean(time_between_downs)
plotGraph(time_between_ups)
def timeBetweenDOWNS():
# Define the list first
eventFile = open(userFilePath, "r")
eventList = json.loads(eventFile.read())
downs = ([(etype, etime) for etype, etime in eventList[0] if etype == "Down"])
while len(downs) > 1:
startTime = downs.pop(0)[1] #Get the time from the tuple
betweenTime = downs[0][1] - startTime
time_between_downs.append(betweenTime)
#average = numpy.mean(time_between_downs)
plotGraph(time_between_downs)
def usernamePasswordInput():
keyLogger = KeyLogger()
hookManager = pyHook.HookManager()
hookManager.KeyDown = keyLogger.keyDownEvent
hookManager.KeyUp = keyLogger.keyUpEvent
hookManager.HookKeyboard()
keyLogger.mainLoop()
# Unhooks the keyboard, no more data recorded, returns to menu
hookManager.UnhookKeyboard()
class KeyLogger(object):
def __init__(self):
self.enterPressed = False
self.eventList = []
def keyDownEvent(self, event):
self.storeEvent("Down", event)
return True
# Fixes Requires Integer Bug (Got Nonetype)
def keyUpEvent(self, event):
self.storeEvent("Up", event)
return True
# Fixes Requires Integer (Got Nonetype)
def mainLoop(self):
while not self.enterPressed:
pythoncom.PumpWaitingMessages()
def storeEvent(self, activity, event):
keystrokeTime = int(event.Time)
#keystrokeCharacter = chr(event.Ascii)
self.eventList.append ((activity, int(keystrokeTime)))
# Chosen to use Escape key (ESC) due to input using a similar method
# Enter Key - KeyCode: 13 Ascii: 13 ScanCode: 28 - ESC = 27 @ Ascii
if event.Ascii == 27:
self.enterPressed = True
userRecordData(self.eventList)
# Starts the program
banner()
#Main Program Loop
while True:
menuOptions()
menuHandler()
|
import cv2
from face_recognizer import FaceRecognizer
LIBRARY_FOLDER_PATH = "#PATH TO THE LIBRARY FOLDER#"
IMAGE_PATH = "#PATH TO THE IMAGE THAT NEEDS TO BE ANALYZED#"
faces_names, image = FaceRecognizer(LIBRARY_FOLDER_PATH).classify(IMAGE_PATH)
cv2.imshow('image', image)
cv2.waitKey(0)
|
import time
import vk
def get_members(api: vk.API, group_id: int, fields: str = "", delay: float = 0.4):
spy_requests = api.groups.getMembers(group_id=group_id, fields=fields)
count = spy_requests["count"]
members = set(spy_requests["items"])
if count > 1000:
for i in range(1, (count // 1000) + 1):
time.sleep(delay)
members.update(
set(api.groups.getMembers(group_id=group_id, fields=fields, offset=i*1000)["items"])
)
return members
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
from IPython import get_ipython
# %%
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
train = pd.read_csv('Train1.csv')
train.head()
test = pd.read_csv('test1.csv')
test.head()
train_original=train.copy()
test_original=test.copy()
train.columns = pd.Index(['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area', 'Loan_Status'],
dtype='object')
test.columns = pd.Index(['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education',
'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount',
'Loan_Amount_Term', 'Credit_History', 'Property_Area'],
dtype='object')
train.shape
(614, 13)
test.shape
(367, 12)
# %%
train.isnull().sum()
# %%
train['Gender'].fillna(train['Gender'].mode()[0], inplace=True)
train['Married'].fillna(train['Married'].mode()[0], inplace=True)
train['Dependents'].fillna(train['Dependents'].mode()[0], inplace=True)
train['Self_Employed'].fillna(train['Self_Employed'].mode()[0], inplace=True)
train['Credit_History'].fillna(train['Credit_History'].mode()[0], inplace=True)
train['Loan_Amount_Term'].value_counts()
# %%
train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0], inplace=True)
train['LoanAmount'].fillna(train['LoanAmount'].median(), inplace=True)
train.isnull().sum()
# %%
test['Gender'].fillna(train['Gender'].mode()[0], inplace=True)
test['Married'].fillna(train['Married'].mode()[0], inplace=True)
test['Dependents'].fillna(train['Dependents'].mode()[0], inplace=True)
test['Self_Employed'].fillna(train['Self_Employed'].mode()[0], inplace=True)
test['Credit_History'].fillna(train['Credit_History'].mode()[0], inplace=True)
test['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0], inplace=True)
test['LoanAmount'].fillna(train['LoanAmount'].median(), inplace=True)
test.isnull().sum()
# %%
train['LoanAmount_log']=np.log(train['LoanAmount'])
train['LoanAmount_log'].hist(bins=20)
test['LoanAmount_log']=np.log(test['LoanAmount'])
train=train.drop('Loan_ID' ,axis=1)
test=test.drop('Loan_ID',axis=1)
X = train.drop('Loan_Status',1)
y = train.Loan_Status
X = pd.get_dummies(X)
train=pd.get_dummies(train)
test=pd.get_dummies(test)
# %%
from sklearn.model_selection import train_test_split
x_train, x_cv, y_train, y_cv = train_test_split(X,y, test_size=0.3)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model = LogisticRegression(max_iter=10000)
model.fit(x_train, y_train)
LogisticRegression()
pred_cv = model.predict(x_cv)
accuracy_score(y_cv,pred_cv)
# %%
pred_test = model.predict(test)
# %%
submission = pd.read_csv('Sample1.csv')
submission.head()
submission['Loan_Status']=pred_test
submission['Loan_ID']=test_original['Loan_ID']
submission['Loan_Status'].replace(0, 'N', inplace=True)
submission['Loan_Status'].replace(1, 'Y', inplace=True)
pd.DataFrame(submission, columns=['Loan_ID','Loan_Status']).to_csv('output.csv')
|
# Copyright (c) Facebook, Inc. and its affiliates.
# The following script requires Java 1.8.0 and pycocotools installed.
# The pycocoevalcap can be installed with pip as
# pip install git+https://github.com/flauted/coco-caption.git@python23
# Original pycocoevalcap code is at https://github.com/tylin/coco-caption
# but has no python3 support yet.
import json
import argparse
from builtins import dict
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.spice.spice import Spice
class COCOEvalCap:
"""
COCOEvalCap code is adopted from https://github.com/tylin/coco-caption
"""
def __init__(self, img_ids, coco, coco_res):
self.eval_imgs = []
self.eval = dict()
self.img_to_eval = dict()
self.coco = coco
self.coco_res = coco_res
def evaluate(self):
gts = self.coco
res = self.coco_res
# =================================================
# Set up scorers
# =================================================
print("tokenization...")
tokenizer = PTBTokenizer()
gts = tokenizer.tokenize(gts)
res = tokenizer.tokenize(res)
# =================================================
# Set up scorers
# =================================================
print("setting up scorers...")
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
(Cider(), "CIDEr"),
(Spice(), "SPICE"),
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
print("computing %s score..." % (scorer.method()))
score, scores = scorer.compute_score(gts, res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
self.set_eval(sc, m)
self.set_img_to_eval_imgs(scs, gts.keys(), m)
print("%s: %0.3f" % (m, sc))
else:
self.set_eval(score, method)
self.set_img_to_eval_imgs(scores, gts.keys(), method)
print("%s: %0.3f" % (method, score))
self.set_eval_imgs()
# anwen hu 2020/9/16
"""for img_id in res.keys():
# print('res_id', res_id)
hypo = res[img_id]
gt_captions = gts[img_id]
cider = self.img_to_eval[img_id]['CIDEr']
if cider*100 < 20:
print(img_id, cider, hypo)
print(gt_captions)
print('=================')"""
def set_eval(self, score, method):
self.eval[method] = score
def set_img_to_eval_imgs(self, scores, img_ids, method):
for img_id, score in zip(img_ids, scores):
if img_id not in self.img_to_eval:
self.img_to_eval[img_id] = dict()
self.img_to_eval[img_id]["image_id"] = img_id
self.img_to_eval[img_id][method] = score
def set_eval_imgs(self):
self.eval_imgs = [eval for img_id, eval in self.img_to_eval.items()]
def calculate_metrics(img_ids, dataset_dts, dataset_res):
img_to_anns_gts = {id: [] for id in img_ids}
for ann in dataset_dts["annotations"]:
img_to_anns_gts[ann["image_id"]] += [ann]
img_to_anns_res = {id: [] for id in img_ids}
for ann in dataset_res["annotations"]:
img_to_anns_res[ann["image_id"]] += [ann]
eval_obj = COCOEvalCap(img_ids, img_to_anns_gts, img_to_anns_res)
eval_obj.evaluate()
return eval_obj.eval, eval_obj.img_to_eval
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Image captioning metrics")
parser.add_argument("--reference_json", help="Path to reference captions json")
parser.add_argument("--predicted_json", help="Path to predicted captions json")
args = parser.parse_args()
with open(args.reference_json, "r") as f:
captions = json.load(f)
references = []
img_ids = []
for img in captions["images"]:
if img["split"] == "test":
for c in img["sentences"]:
d = {}
d["image_id"] = c["imgid"]
img_ids.append(c["imgid"])
d["caption"] = c["raw"]
references.append(d)
img_ids = list(set(img_ids))
with open(args.predicted_json, "r") as f:
preds = json.load(f)
dataset_dts = {"annotations": references}
dataset_res = {"annotations": preds}
print(calculate_metrics(img_ids, dataset_dts, dataset_res))
|
"""Define fixtures available for all tests."""
from unittest.mock import Mock, patch
from pytest import fixture
MOCK_AREAS_0 = [
{"bank": 0, "name": "Area 1", "sequence": 30, "status": "Ready"},
]
MOCK_AREAS_1 = [
{"bank": 0, "name": "Area 1", "sequence": 31, "status": "Not Ready"},
# A dummy invalid bank to trigger throwing an invalid sensor update
# for test coverage...
{"bank": 98, "name": "Invalid", "sequence": 0, "status": "Ready"},
]
MOCK_AREAS_2 = [
# We return to a ready state
{"bank": 0, "name": "Area 1", "sequence": 32, "status": "Ready"},
]
MOCK_ZONES_0 = [
{"bank": 0, "name": "Front door", "sequence": 1, "status": "Ready"},
{"bank": 1, "name": "Back door", "sequence": 1, "status": "Ready"},
]
MOCK_ZONES_1 = [
{"bank": 0, "name": "Front door", "sequence": 2, "status": "Not Ready"},
{"bank": 1, "name": "Back door", "sequence": 1, "status": "Ready"},
# A dummy invalid bank to trigger throwing an invalid sensor update
# for test coverage...
{"bank": 98, "name": "Invalid", "sequence": 0, "status": "Ready"},
]
MOCK_ZONES_2 = [
# Backdoor sensor was removed
{"bank": 0, "name": "Front door", "sequence": 3, "status": "Ready"},
]
MOCK_RESPONSES = (
{
"areas": MOCK_AREAS_0,
"zones": MOCK_ZONES_0,
},
{
"areas": MOCK_AREAS_1,
"zones": MOCK_ZONES_1,
},
{
"areas": MOCK_AREAS_2,
"zones": MOCK_ZONES_2,
},
)
@fixture
def ultrasync_api(hass):
"""Mock UltraSync for easier testing."""
with patch("ultrasync.UltraSync") as mock_api:
instance = mock_api.return_value
instance.login = Mock(return_value=True)
instance.details = Mock(side_effect=MOCK_RESPONSES)
instance.areas = Mock(return_value=list(MOCK_AREAS_0))
instance.zones = Mock(return_value=list(MOCK_ZONES_0))
yield mock_api
|
#!/usr/bin/env python
"""Inspecting the call stack.
"""
#end_pymotw_header
import inspect
def show_stack():
for level in inspect.stack():
frame, filename, line_num, func, src_code, src_index = level
print '%s[%d]\n -> %s' % (filename,
line_num,
src_code[src_index].strip(),
)
print inspect.getargvalues(frame)
print
def recurse(limit):
local_variable = '.' * limit
if limit <= 0:
show_stack()
return
recurse(limit - 1)
return
if __name__ == '__main__':
recurse(2)
|
from django.conf.urls import url
from .views import (index, upload_resume, upload_profilepic, profile, edit_personalinfo, edit_profile_description,
edit_professionalinfo, add_language, edit_language, delete_language, add_experience, edit_experience,
delete_experience, add_education, edit_education, delete_education, add_technicalskill, edit_technicalskill,
delete_technicalskill, add_project, edit_project, delete_project, edit_email, job_alert, job_alert_results,
modify_job_alert, alerts_list, delete_job_alert, edit_emailnotifications, delete_resume, user_password_change, messages)
app_name = "candidate"
urlpatterns = [
# url(r'^home/$','home'),
url(r'^$', index, name="index"),
url(r'^profile/$', profile, name="profile"),
url(r'personalinfo/edit/$', edit_personalinfo, name="edit_personalinfo"),
url(r'profile_description/edit/$', edit_profile_description, name="edit_profile_description"),
url(r'email/edit/$', edit_email, name="edit_email"),
url(r'professionalinfo/edit/$', edit_professionalinfo, name="edit_professionalinfo"),
# mobile verify
# url(r'^mobile/verify/$', verify_mobile, name="verify_mobile"),
# url(r'^send/mobile_verification_code/$', send_mobile_verification_code, name="send_mobile_verification_code"),
# language urls
url(r'language/add/$', add_language, name="add_language"),
url(r'language/edit/(?P<language_id>[a-zA-Z0-9_-]+)/$',
edit_language, name="edit_language"),
url(r'language/delete/(?P<language_id>[a-zA-Z0-9_-]+)/$',
delete_language, name="delete_language"),
# experience urls
url(r'experience/add/$', add_experience, name="add_experience"),
url(r'experience/edit/(?P<experience_id>[a-zA-Z0-9_-]+)/$',
edit_experience, name="edit_experience"),
url(r'experience/delete/(?P<experience_id>[a-zA-Z0-9_-]+)/$',
delete_experience, name="delete_experience"),
# education urls
url(r'education/add/$', add_education, name="add_education"),
url(r'education/edit/(?P<education_id>[a-zA-Z0-9_-]+)/$',
edit_education, name="edit_education"),
url(r'education/delete/(?P<education_id>[a-zA-Z0-9_-]+)/$',
delete_education, name="delete_education"),
# techskill urls
url(r'technicalskill/add/$', add_technicalskill,
name="add_technicalskill"),
url(r'technicalskill/edit/(?P<technical_skill_id>[a-zA-Z0-9_-]+)/$',
edit_technicalskill, name="edit_technicalskill"),
url(r'technicalskill/delete/(?P<technical_skill_id>[a-zA-Z0-9_-]+)/$',
delete_technicalskill, name="delete_technicalskill"),
# project urls
url(r'project/add/$', add_project, name="add_project"),
url(r'project/edit/(?P<project_id>[a-zA-Z0-9_-]+)/$',
edit_project, name="edit_project"),
url(r'project/delete/(?P<project_id>[a-zA-Z0-9_-]+)/$',
delete_project, name="delete_project"),
# resume urls
url(r'upload_resume/$', upload_resume, name="upload_resume"),
url(r'delete-resume/$', delete_resume, name="delete_resume"),
url(r'upload_profilepic/$', upload_profilepic, name="upload_profilepic"),
url(r'edit_emailnotifications/$', edit_emailnotifications,
name="edit_emailnotifications"),
# job alert
url(r'^alert/create/$', job_alert, name="job_alert"),
url(r'^alert/list/$', alerts_list, name="alerts_list"),
url(r'^alert/list/(?P<page_num>[-\w]+)/$', alerts_list),
url(r'^alert/results/(?P<job_alert_id>[a-zA-Z0-9_-]+)/$',
job_alert_results, name="job_alert_results"),
url(r'^alert/modify/(?P<job_alert_id>[a-zA-Z0-9_-]+)/$',
modify_job_alert, name="modify_job_alert"),
url(r'^alert/delete/(?P<job_alert_id>[a-zA-Z0-9_-]+)/$',
delete_job_alert, name="delete_job_alert"),
url(r'user/password/change/', user_password_change,
name="user_password_change"),
url(r'^messages/$', messages, name="messages"),
]
|
from pydantic import BaseModel
import toml
class TomlModel(BaseModel):
@classmethod
def load(cls, file):
with open(file, "r") as f:
return cls.parse_obj(toml.load(f))
def dump(self, file):
with open(file, "w") as f:
toml.dump(self.dict(), f)
class WebserialConfig(TomlModel):
calibre_library: str = ""
calibre_username: str = ""
calibre_password: str = ""
|
#This file contains a series of functions to generate the necessary
#wires to drive the storage grid. The standards for the wires are laid out
#in make_store_cell
#takes in the output file manager, the number of entries, the number of bits
#and the number of reads
#Matthew Trahms
#EE 526
#4/20/21
def make_store_grid_wires(out_file, entries, bits, reads, num_regfiles):
#make write enables
make_wires(out_file, 'we', (entries, num_regfiles))
#make latch output wires
make_wires(out_file, 'lo', (entries, bits, num_regfiles))
#make latch input wires
make_wires(out_file, 'li', (bits, num_regfiles))
#make buffer output wires
make_wires(out_file, 'bo', (bits, reads, num_regfiles))
#make read enables
make_wires(out_file, 're', (entries, reads, num_regfiles))
return
#generic function to generate a set of string wire names based on a prefix
#and tuple of dimensions, returns a list of strings
#dimensions are in an n-entry tuple treated as an n-dim rectangle
#DO NOT HAVE A VALUE OF 0 FOR ONE OF THE DIMS
def make_wire_names(prefix, dims):
prog = [prefix]
for dim in dims:
lastprog = prog
prog = list()
for wirename in lastprog:
for index in range(dim):
new_wire = wirename + '_' + str(index)
prog.append(new_wire)
return prog
#translates from a list of wire names to a string corresponding to the correct
#syntax for wire declarations
def make_wire_line(wire_names):
output = "wire " + wire_names[0]
wire_names = wire_names[1:]
for name in wire_names:
output += ', '
output += name
output += ';\n'
return output
#creates a set of wires based on a prefix and a set of dimensions
#writes the correct syntax of wires to the output file
def make_wires(output_file, prefix, dims):
names = make_wire_names(prefix, dims)
line = make_wire_line(names)
output_file.write(line)
if __name__ == "__main__":
f = open('make_wire_names_test.txt', 'w')
names = make_wire_names('test', (3,2,1,2))
print(len(names) == (3*2*1*2))
f.write(make_wire_line(names))
f.close()
|
from datetime import datetime
from openapi.db import CrudDB
async def test_upsert(db: CrudDB) -> None:
task = await db.db_upsert(db.tasks, dict(title="Example"), dict(severity=4))
assert task["id"]
assert task["severity"] == 4
assert task["done"] is None
task2 = await db.db_upsert(
db.tasks, dict(title="Example"), dict(done=datetime.now())
)
task2["id"] == task["id"]
assert task2["done"]
async def test_upsert_no_data(db: CrudDB) -> None:
task = await db.db_upsert(db.tasks, dict(title="Example2"))
assert task["id"]
assert task["title"] == "Example2"
|
#13-1 pandas とmodelのやり取りを行う
import pandas as pd
import numpy as np
#dataframe を numpy 配列に直す
data = pd.DataFrame(~)
data.values #array(~)
df2 = pd.DataFrame(data.values,columns = [~])
data.loc[:,["a","b"]].values #範囲を指定
dummies = pd.get_dummies(data.category,prefix="~") #~列をdummieにする
data_with_dummies = data.drop("~",axis = 1).join(dummies)#dummy変数をjoin
#13-2 pastyを使ったモデルの記述
#Rに近いらしい
data = pd.DataFrame({"x0":~,"x1":~,"y":~})
import pasty
y,X = pasty.dmatrices("y~x0+x1",data)
y # y列のデータ
X #xo,x1の行列
np.asarray(y) # array化
np.asarray(X) # array化 dummy定数1が入る
y,X = pasty.dmatrices("y~x0+x1+0",data)
#+0を入れることで、切片校をなくす
coef,resid,_,_ = np.linalg.lstsq(X,y) #最小二乗法
#13-2-1 pasty式によるデータ変換
y,X = pasty.dmatrices("y~x0+np.log(np.abs(x1) + 1)",data)
y,X = pasty.dmatrices("y~standardize(x0)+center(x1)",data)
#standardize 標準化
#center 中心化 平均値を引く
new_data = pd.DataFrame(~)
new_X = pasty.build_design_matrices([X.design_info],new_data)#Xのデータをnew_dataに変更
y,X = pasty.dmatrices("y~I(x0+x1)",data) #I()にくくることでxo + x1の意味を足し算にできる
#13-2-2 カテゴリー型データとpasty
df = pd.DataFrame({
"key1":["a","b",~],
"key2" : [1,0,1,0,~]
"v2" : [1,2,3~]
})
y,X = pasty.dmatrices("v2 ~ key1",data)
X #key1 にダミー変数が与えられる。
y,X = pasty.dmatrices("v2 ~ key1+0",data) #key1[a],key1[b]にそれぞれダミー変数が入る。(aがあるところに1、bに0 bに1、aに0)
y,X = pasty.dmatrices("v2 ~ C(key2)",data)#key2をカテゴリーが他で読み込む
y,X = pasty.dmatrices("v2 ~ key1 + key2 + key1:key2",data) #key1:key2で&データを作る。
#statsmedels 入門
#13-3-1 線形モデルの推定
import statsmodels.api as sm
import statsmodels.formula.api as smf
#ex) ランダムデータから線形モデルを一個作る
def dnorm(mean,variance,size = 1):
if isinstance(size,int):
size = size,
return mean + np.sqrt(variance)*np.random.randn(*size) #*変数で、変数をタプル化する https://pycarnival.com/one_asterisk/ *の意味がここに
#再現性のために乱数シード
np.random.seed(12345)
N = 100
X = np.c_[dnorm(0,0.4,size = N),dnorm(0,0.6,size = N),dnorm(0,0.2,size = N)]
eps = dnorm(0,0.1,size = N)
beta = [0.1,0.3,0.5]
y = np.dot(X,beta) + eps
X_model = sm.add_constant(X)
#Xに切片1を加える。
model = sm.OLS(y,X) #線形回帰モデル
results = model.fit()
results.params
print(results.summary()) #いろいろ出る
results = smf.ols("y~a+b+c",data = data ).fit() #ひとまとめでできる
result.param
result.tvalues
#13-3-2 時系列モデルの推定
init_x = 4
import random
values = [init_x,init_x]
N = 1000
b0 = 0.8
b1 = -0.4
noise = dnorm(0,0.1,N)
for i in range(N):
new_x = values[-1] * b0 + values[-2]*b1 + noise[i]
values.append(new_x)
#AR(2)過程
MAXLAGS = 5
model = sm.tsa.AR(values)
results = models.fit(MAXLAGS) #ラグ指定
#13-4scikit-learn 入門
train = pd.read_txt("titanic.UTF")
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
model.fit(X_train,y_train)
y_predict = model.predict(X_test)
#交差検証 トレーニングデータを分割して、サンプル外のデータへの予測をシミュレートする
from sklearn.linear_model import LogisticRegressionCV
model_cv = LogisticRegressionCV(10) #精度を指定
model_CV.fit(X_train,y_train)
#自分で行い時
from sklearn.model_selection import cross_val_score
model = LogisticRegression(C = 10)
scores = cross_val_score(moedl,X_train,y_train,cv = 4)
|
from typing import Optional, Any
from StructNoSQL.utils.types import TYPED_TYPES_TO_PRIMITIVES
def make_dict_key_var_name(key_name: str) -> str:
return f"$key$:{key_name}"
def try_to_get_primitive_default_type_of_item(item_type: Any):
item_default_primitive_type: Optional[type] = getattr(item_type, '_default_primitive_type', None)
if item_default_primitive_type is not None:
return item_default_primitive_type
item_type_name: Optional[str] = getattr(item_type, '_name', None)
if item_type_name is not None:
primitive_from_typed: Optional[type] = TYPED_TYPES_TO_PRIMITIVES.get(item_type_name, None)
if primitive_from_typed is not None:
return primitive_from_typed
return item_type
|
try:
from maya import cmds
except ImportError:
print("Must be in a maya environment!")
raise
from rig.maya.dag import get_positions
def create_line(objects, attach=True, attachParents=[], name=""):
"""
Creates a line between objects, that optionally
attaches to each
"""
if not name:
name = "line_display"
print("Attach is: {}".format(attach))
positions = get_positions(objects)
curve = create_from_points(positions, name=name, degree=1)
# Rig CVs to each object
if attach:
if not attachParents:
attachParents = objects[:]
print("Attaching...")
cvs = get_cvs(curve)
for i in range(len(objects)):
cluster = cmds.cluster(cvs[i])
cmds.parentConstraint(objects[i], cluster, maintainOffset=True)
return curve
def get_cvs(curve):
"""
Given a curve, return its CVs (flattened)
:param str curve: name of curve object
:returns list cvs: list of component cvs
"""
return cmds.ls("{0}.cv[*]".format(curve), flatten=True)
def get_cv_positions(cvs):
"""
Given some components, query their position
in world space
:param list cvs:
:returns list positions:
"""
positions = list()
for cv in cvs:
ws = cmds.xform(cv, query=True, worldSpace=True, translation=True)
positions.append(ws)
return positions
def create_from_points(points, degree=1, name="curve#"):
knotList = [0]
if degree == 1:
knotList.extend(range(1, len(points)))
if degree == 3:
knotList.extend([0])
knotList.extend(range(len(points) - 2))
knotList.extend([knotList[-1], knotList[-1]])
curve = cmds.curve(degree=degree, point=points, knot=knotList)
curve = cmds.rename(curve, name)
return curve
def reorient(curve, downAxis):
x = 0
y = 0
z = 0
if downAxis == "x" or "-x":
z = z + 90
elif downAxis == "y" or "-y":
y = 90
else:
x = x + 90
cmds.rotate(x, y, z, get_cvs(curve)) |
from __future__ import print_function
import numpy as np
weights = np.load("pspnet101_voc2012.npy", encoding="latin1").item()
settable_weights = 0
for layer, value in weights.items():
print(layer)
for attrib, vals in weights[layer].items():
if attrib == "weights":
print("weights: ", vals.shape)
else:
print(attrib)
settable_weights += 1
print("Total settable weights %i" % settable_weights)
|
from sipTransportConnection import SIPTransportConnection
class UDPSIPTransportConnection(SIPTransportConnection):
def __init__(self, bind_address_string, remote_address_string, bind_port_integer, remote_port_integer):
self.twistedProtocol = None
super(UDPSIPTransportConnection, self).__init__(bind_address_string, remote_address_string, bind_port_integer, remote_port_integer)
@property
def is_reliable(self):
return False
@property
def is_stateful(self):
return False
def send_message(self, a_sip_message):
self.twistedProtocol.send_message(a_sip_message)
|
"""
Registrador de eventos.
"""
from logging import INFO, FileHandler, Formatter, StreamHandler, getLogger
from typing import TYPE_CHECKING
from ..auxiliar import Singleton
from ..constantes import LOG_PATH
if TYPE_CHECKING:
from logging import Logger
class LectorLogger(metaclass=Singleton):
"""
Clase que registra eventos del bot.
Hecho con patrón singleton.
"""
def __new__(cls) -> "LectorLogger":
"""
Devuelve la instancia de la clase,
la cual es única y no puede tener duplicados
"""
if not hasattr(cls, "_instance"):
cls._instancia = super(LectorLogger, cls).__new__(cls)
return cls._instancia
def __init__(self,
*,
nombre_log: str="lector",
nivel_log: int=INFO,
fmt: str="[ %(asctime)s ] [ %(levelname)s ] %(message)s",
fmt_fecha: str="%d-%m-%Y %I:%M:%S %p") -> None:
"""
Crea una instancia de 'LectorLogger'.
"""
super().__init__()
self._formato: str = fmt
self._fmt_fecha: str = fmt_fecha
self._formateador = Formatter(fmt=self.formato, datefmt=self.fmt_fecha)
self.handler_archivo = FileHandler(filename=LOG_PATH, encoding="utf-8")
self.handler_consola = StreamHandler()
self.actualizar_formateador()
self.logger: "Logger" = getLogger(nombre_log)
self.logger.setLevel(nivel_log)
self.logger.addHandler(self.handler_archivo)
self.logger.addHandler(self.handler_consola)
def actualizar_formateador(self) -> None:
"""
Actualiza el formateador para cada handler que el logger tiene.
"""
self.handler_archivo.setFormatter(self.formateador)
self.handler_consola.setFormatter(self.formateador)
@property
def formateador(self) -> Formatter:
"""
Devuelve el formateador en uso.
"""
return self._formateador
@formateador.setter
def formateador(self, nuevo_formateador: Formatter) -> None:
self._formateador = nuevo_formateador
self.actualizar_formateador()
@property
def formato(self) -> str:
"""
Devuelve el formato de los mensajes del log.
"""
return self._formato
@formato.setter
def formato(self, nuevo_formato) -> None:
self._formato = nuevo_formato
self.formateador = Formatter(fmt=self.formato, datefmt=self.fmt_fecha)
@property
def fmt_fecha(self) -> str:
"""
Devuelve el formato de fecha de los mensajes del log.
"""
return self._fmt_fecha
@fmt_fecha.setter
def fmt_fecha(self, nuevo_fmt_fecha: str) -> None:
self._fmt_fecha = nuevo_fmt_fecha
self.formateador = Formatter(fmt=self.formato, datefmt=self.fmt_fecha)
def debug(self, mensaje: str, *args, **kwargs) -> None:
"""
Registra un evento de nivel DEBUG.
"""
self.logger.debug(mensaje, *args, **kwargs)
def info(self, mensaje: str, *args, **kwargs) -> None:
"""
Registra un evento de nivel INFO.
"""
self.logger.info(mensaje, *args, **kwargs)
def warning(self, mensaje: str, *args, **kwargs) -> None:
"""
Registra un evento de nivel WARNING.
"""
self.logger.warning(mensaje, *args, **kwargs)
def error(self, mensaje: str, *args, **kwargs) -> None:
"""
Registra un evento de nivel ERROR.
"""
self.logger.error(mensaje, *args, **kwargs)
def critical(self, message: str, *args, **kwargs) -> None:
"""
Registra un evento de nivel CRITICAL.
"""
self.logger.critical(message, *args, **kwargs)
def exception(self, mensaje, *args, exc_info=True, **kwargs) -> None:
"""
Registra una excepción.
"""
self.logger.exception(mensaje, *args, exc_info, **kwargs)
|
import sys
import data_IO
import json
if len(sys.argv) < 3:
print("Number of provided arguments: ", len(sys.argv) - 1)
print("Usage: python testKPIreaderJSON.py <desiredMetrics.json> <outputDir> ")
sys.exit()
kpiFileAddress = sys.argv[1]
outputDir = sys.argv[2]
# Read the desired outputs/metrics from the csv file:
fp_csvin = data_IO.open_file(kpiFileAddress)
kpihash = json.load(fp_csvin)
fp_csvin.close()
print(kpihash)
import json
obj_json = kpihash
print(json.dumps(obj_json, indent=4))
fkjson = data_IO.open_file(outputDir + "/kpi.json","w")
fkjson.write(json.dumps(obj_json, indent=4))
fkjson.close()
|
# -*- coding: utf-8 -*-
"""
updater enumerations module.
"""
from pyrin.core.decorators import class_property
from pyrin.core.enumerations import CoreEnum
class UpdaterCategoryEnum(CoreEnum):
"""
updater category enum.
"""
CONTENT_RATE = 'content_rate'
COUNTRY = 'country'
GENRE = 'genre'
LANGUAGE = 'language'
META_SCORE = 'meta_score'
POSTER_NAME = 'poster_name'
ORIGINAL_TITLE = 'original_title'
PRODUCTION_YEAR = 'production_year'
IMDB_RATE = 'imdb_rate'
RUNTIME = 'runtime'
STORYLINE = 'storyline'
TITLE = 'title'
ACTORS = 'actors'
DIRECTORS = 'directors'
@class_property
def persons(self):
"""
gets all enumeration values related to persons.
:rtype: tuple[str]
"""
return self.ACTORS, self.DIRECTORS
|
from django.shortcuts import render
from django.http import HttpResponse
def index(request) -> HttpResponse:
"""FAQs index view
"""
return render(request, "faqs/index.html")
def section(request, section_title: str) -> HttpResponse:
"""FAQs section view - FAQ lists for participants, organizers, photographers
"""
ctx = {
"section_title": section_title,
}
return render(request, "faqs/section.html", context=ctx)
|
class Solution(object):
def numberToWords(self, num):
"""
:type num: int
:rtype: str
"""
to19 = 'One Two Three Four Five Six Seven Eight Nine Ten Eleven Twelve ' \
'Thirteen Fourteen Fifteen Sixteen Seventeen Eighteen Nineteen'.split()
tens = 'Twenty Thirty Forty Fifty Sixty Seventy Eighty Ninety'.split()
def words(n):
if n < 20:
return to19[n-1:n] #n=0 will give an empty list
elif n < 100:
return [tens[n/10-2]] + words(n%10)
elif n < 1000:
return [to19[n/100-1]] + ['Hundred'] + words(n%100)
for order, word in enumerate(('Thousand', 'Million', 'Billion'), 1):
if n < 1000**(order+1):
return words(n/1000**order) + [word] + words(n%1000**order)
# e.g. 'Thousand' below:
# if n < 1000000:
# return [words[n/1000]] + ['Thousand'] + words(n%1000)
return 'Zero' if num==0 else ' '.join(words(num))
|
import pytest
from pygraphblas import *
from pygraphblas import lib
def test_options_set():
opts = options_get()
iz = lambda name, typ: isinstance(opts.get(name), typ)
assert iz("nthreads", int)
assert iz("chunk", float)
assert iz("burble", int)
assert iz("format", int)
assert iz("hyper_switch", float)
assert iz("bitmap_switch", list)
assert opts["burble"] == 0
options_set(nthreads=4)
options_set(chunk=4096)
options_set(burble=1)
options_set(format=lib.GxB_BY_COL)
options_set(hyper_switch=1.0)
options_set(bitmap_switch=[1, 2, 3, 4, 5, 6, 7, 8])
news = options_get()
ez = lambda name, v: news.get(name) == v
assert ez("nthreads", 4)
assert ez("chunk", 4096)
assert ez("burble", 1)
assert ez("format", lib.GxB_BY_COL)
assert ez("hyper_switch", 1.0)
assert ez("bitmap_switch", [1, 2, 3, 4, 5, 6, 7, 8])
options_set(**opts)
assert opts == options_get()
|
class Base1(object):
def __init__(self):
self.str1 = "anum"
print "Base1"
class Base2(object):
def __init__(self):
self.str2 = "sharma"
print "Base2"
class Derived(Base1, Base2):
def __init__(self):
# Calling constructors of Base1
# and Base2 classes
Base1.__init__(self)
Base2.__init__(self)
print "Derived"
def printString(self):
print(self.str1, self.str2)
ob = Derived()
ob.printString()
"""
Base1
Base2
Derived
('anum', 'sharma')
"""
|
import time
import Box2D
import Box2D.b2
import gym
from gym import spaces
from gym.utils import colorize, seeding, EzPickle
import numpy as np
env = gym.make('Box2D:BipedalWalkerHardcore-v3')
observations = env.reset()
reward = 0
import neural_network_NE_agent_2
neural_network_NE_agent_2.Evolution.simulate_generation(self=neural_network_NE_agent_2.Evolution, observation=None, is_dead=True, score=0, first_time=True)
first_time = False
done = False
for i in range(10000):
for q in range(neural_network_NE_agent_2.genomes):
start_timer = time.time()
while not done:
output_signal = neural_network_NE_agent_2.Evolution.simulate_generation(self=neural_network_NE_agent_2.Evolution, observation=observations, is_dead=False, score=reward, first_time=False)
observations, reward, done, info = env.step(output_signal[0])
done = False
observations = env.reset()
neural_network_NE_agent_2.Evolution.simulate_generation(self=neural_network_NE_agent_2.Evolution, observation=None, is_dead=True, score=0, first_time=False)
|
FULLNODE = "http://node.deviceproof.org:14265"
# FULLNODE = "http://node10.puyuma.org:14265"
SEED = 'AMRWQP9BUMJALJHBXUCHOD9HFFD9LGTGEAWMJWWXSDVOF9PI9YGJAPBQLQUOMNYEQCZPGCTHGVNNAPGHA'
|
from sklearn.model_selection import train_test_split
from utils.data_util import get_stocks
def prepare_data(company_symbol, result_feature, features, forecast_out, test_size, random_state):
"""
Method will shift data values by given 'forecast_out' amount. Basically, we will be predicting 'n' values
in the future by shifting the values 'n' backwards. So, we will use the data that comes 'n' days ago to
predict today and further.
Parameter
-------
_dataset = Stock Dataset
Returns
-------
X_train : Set of features (For training the model)
X_test : Set of features (For evaluating the model)
y_train : Set of label (For training the model)
y_test : Set of label (For evaluation the model)
x_forecast: Forecast out (It will be used to predict 'n' days ahead
"""
dataset = get_stocks(company_symbol, features, result_feature)
dataset['Prediction'] = dataset[result_feature].shift(-forecast_out)
X = dataset.loc[:, features].values
X = X[:-forecast_out]
full_dataset = dataset.drop(columns='Prediction', axis=1)
y = dataset.loc[:, 'Prediction'].values
y = y[:-forecast_out]
x_forecast = dataset.loc[:, features].values
x_forecast = x_forecast[-forecast_out:]
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=test_size,
random_state=random_state)
return X_train, X_test, y_train, y_test, x_forecast, full_dataset
def get_model_performance(regression_model, X_test, y_test):
"""
Method will use the passed predictor to make prediction on test data and report the accuracy metrics.
Returns
-------
performance_results = A dictionary which includes performance metrics:
variance_score
max_error_value
mean_abs_error_value
mean_square_error_value
r2_value
"""
from sklearn.metrics import explained_variance_score, max_error, mean_absolute_error, mean_squared_error, r2_score
# Predicting the Test set results
y_pred = regression_model.predict(X_test)
# Performance Metrics:
variance_score = explained_variance_score(y_test, y_pred)
max_error_value = max_error(y_test, y_pred)
mean_abs_error_value = mean_absolute_error(y_test, y_pred)
mean_square_error_value = mean_squared_error(y_test, y_pred)
r2_value = r2_score(y_test, y_pred)
# Metric Dictionary
performance_results = {
"variance_score": variance_score,
"max_error_value": max_error_value,
"mean_abs_error_value": mean_abs_error_value,
"mean_square_error_value": mean_square_error_value,
"r2_value": r2_value
}
return performance_results
def get_future_predictions(regression_model, x_forecast):
"""
Method will use the passed predictor to make prediction for the future stock prices
Parameter
-------
predictor = A Linear Regression Model that has been created and fit before
Returns
-------
forecast_stocks = List of future stock prices
"""
forecast_stocks = regression_model.predict(x_forecast)
return forecast_stocks
|
import os
from glob import glob
import hickle
import numpy as np
from datasets.phys import Phys
from utils.config import _C as C
from utils.misc import tprint
plot = False
class PHYRE(Phys):
def __init__(self, data_root, split, template, image_ext='.jpg'):
super().__init__(data_root, split, template, image_ext)
protocal = C.PHYRE_PROTOCAL
fold = C.PHYRE_FOLD
template = C.TEMPLATE
env_list = open(f'{data_root}/splits/{protocal}_{split}_{template}_fold_{fold}.txt', 'r').read().split('\n')
self.video_list = sum([sorted(glob(f'{data_root}/images/{env.replace(":", "/")}/*.npy')) for env in env_list],
[])
self.anno_list = [(v[:-4] + '_boxes.hkl').replace('images', 'labels') for v in self.video_list]
# video_info_name = f'for_plot.npy'
video_info_name = f'{data_root}/{protocal}_{split}_{template}_{self.input_size}_{self.pred_size}_fold_{fold}_info.npy'
if os.path.exists(video_info_name):
print(f'loading info from: {video_info_name}')
self.video_info = np.load(video_info_name)
else:
self.video_info = np.zeros((0, 2), dtype=np.int32)
for idx, video_name in enumerate(self.video_list):
tprint(f'loading progress: {idx}/{len(self.video_list)}')
num_im = hickle.load(video_name.replace('images', 'labels').replace('.npy', '_boxes.hkl')).shape[0]
if plot:
# we will pad sequence so no check
num_sw = 1
else:
assert self.input_size == 1
num_sw = min(1, num_im - self.seq_size + 1)
if num_sw <= 0:
continue
video_info_t = np.zeros((num_sw, 2), dtype=np.int32)
video_info_t[:, 0] = idx # video index
video_info_t[:, 1] = np.arange(num_sw) # sliding window index
self.video_info = np.vstack((self.video_info, video_info_t))
np.save(video_info_name, self.video_info)
def _parse_image(self, video_name, vid_idx, img_idx):
data = np.load(video_name)
return data.reshape(1, 1, data.shape[0], data.shape[1])
def _parse_label(self, anno_name, vid_idx, img_idx):
boxes = hickle.load(anno_name)[img_idx:img_idx + self.seq_size, :, 1:]
if_destroyed = boxes[:, :, -1]
boxes = boxes[:, :, :-1]
gt_masks = np.zeros((self.pred_size, boxes.shape[1], C.RPIN.MASK_SIZE, C.RPIN.MASK_SIZE))
if C.RPIN.MASK_LOSS_WEIGHT > 0:
anno_name = anno_name.replace('boxes.', 'masks.')
gt_masks = hickle.load(anno_name)
gt_masks = gt_masks[img_idx:img_idx + self.seq_size].astype(np.float32)
gt_masks = gt_masks[self.input_size:]
if plot:
boxes = np.concatenate([boxes] + [boxes[[-1]] for _ in range(self.seq_size - boxes.shape[0])], axis=0)
gt_masks = np.concatenate(
[gt_masks] + [gt_masks[[-1]] for _ in range(self.pred_size - gt_masks.shape[0])], axis=0
)
return boxes, if_destroyed, gt_masks
|
"""Helpers for working with Recurly's recurly.js packge"""
from django.template.loader import render_to_string
from django_recurly.conf import SUBDOMAIN, DEFAULT_CURRENCY
from django_recurly.utils import recurly, dump
def get_signature(obj):
return recurly.js.sign(obj)
def get_config(subdomain=SUBDOMAIN, currency=DEFAULT_CURRENCY):
return render_to_string("django_recurly/config.js", {
"subdomain": subdomain,
"currency": currency,
})
def get_signed_form_options(protected_params={}, unprotected_params={}):
from django_recurly.utils import dict_merge
# Protected params
data = dict_merge({}, protected_params)
data['signature'] = get_signature(data)
# Unprotected params (overridden by existing protected params)
data = dict_merge({}, unprotected_params, data)
data['json'] = dump(data, js=True)
return data
def get_subscription_form(plan_code, user, target_element='#recurly-container', protected_params={}, unprotected_params={}):
from django_recurly.utils import dict_merge
# Protected params
protected_data = {
'plan_code': plan_code,
'subscription': {
'plan_code': plan_code,
},
'account': {
'username': user.username,
},
}
dict_merge(protected_data, protected_params)
# Unprotected params
unprotected_data = {
'target': target_element
}
dict_merge(unprotected_data, unprotected_params)
data = get_signed_form_options(protected_data, unprotected_data)
return render_to_string("django_recurly/build_subscription_form.js", data)
def get_billing_info_update_form(user, account, target_element='#recurly-container', protected_params={}, unprotected_params={}):
from django_recurly.utils import dict_merge
# Protected params
protected_data = {
'account_code': account.account_code,
'account': {
'account_code': account.account_code,
'username': account.username,
},
'addressRequirement': 'none',
}
dict_merge(protected_data, protected_params)
# Unprotected params
unprotected_data = {
'target': target_element,
'distinguish_contact_from_billing_info': False,
'account': account.to_dict(js=True),
'billing_info': account.billing_info.to_dict(js=True)
}
dict_merge(unprotected_data, unprotected_params)
data = get_signed_form_options(protected_data, unprotected_data)
return render_to_string("django_recurly/build_billing_info_update_form.js", data)
|
from .conf import *
__version__ = "1.1"
|
import FWCore.ParameterSet.Config as cms
muonIsolations = cms.EDProducer("ValeMapFloatMerger",
src = cms.VInputTag(cms.InputTag("goodMuonIsolations"), cms.InputTag("goodTrackIsolations"), cms.InputTag("goodStandAloneMuonTrackIsolations"))
)
|
{%- set klass = cookiecutter.project_slug.capitalize() -%}
{%- set obj = cookiecutter.project_slug.lower() -%}
{%- set is_open_source = cookiecutter.open_source_license != 'Not open source' -%}
# -*- coding: utf-8 -*-
#
# This file is part of the {{ cookiecutter.project_name }} project
#
# Copyright (c) {% now 'local', '%Y' %} {{ cookiecutter.full_name }}
{% if is_open_source -%}
# Distributed under the {{ cookiecutter.open_source_license }}. See LICENSE for more info.
{% endif %}
"""Tango server class for {{ klass }}"""
import asyncio
import urllib.parse
from connio import connection_for_url
from tango import GreenMode
from tango.server import Device, attribute, command, device_property
import {{ cookiecutter.project_slug }}.core
class {{ klass }}(Device):
green_mode = GreenMode.Asyncio
url = device_property(dtype=str)
async def init_device(self):
await super().init_device()
self.connection = connection_for_url(self.url, concurrency="async")
self.{{ obj }} = {{ cookiecutter.project_slug }}.core.{{ klass }}(self.connection)
@attribute(dtype=str, label="ID")
def idn(self):
return self.{{ obj }}.get_idn()
@attribute(dtype=float, unit="bar", label="Pressure")
async def pressure(self):
# example processing the result
pressure = await self.{{ obj }}.get_pressure()
return pressure / 1000
@attribute(dtype=float, unit="bar", label="Pressure set point")
async def pressure_setpoint(self):
# example processing the result
setpoint = await self.{{ obj }}.get_pressure_setpoint()
return setpoint / 1000
@pressure_setpoint.setter
def pressure_setpoint(self, value):
# example returning the coroutine back to tango
return self.{{ obj }}.get_pressure_setpoint(value * 1000)
@command
def turn_on(self):
# example returning the coroutine back to who calling function
return self.{{ obj }}.turn_on()
if __name__ == "__main__":
import logging
fmt = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level="DEBUG", format=fmt)
{{ klass }}.run_server()
|
from .bloom_filter import BloomFilter, Response
__all__ = ["BloomFilter", "Response"] |
"""
This file solves the second Advent of Code 2020 puzzle.
https://adventofcode.com/2020/day/2
"""
def parse_line(line: str) -> dict:
"""
This function inelegantly gets all the parts of the password policy and password and stores
them in a dictionary.
:param line: The line containing the password policy and password to parse through.
:return: A dict with all the parts of the line with specific keys.
"""
ret_val = {}
line = line.split(':')
min_max = line[0][:-2].split('-')
ret_val['min'] = int(min_max[0])
ret_val['max'] = int(min_max[1])
ret_val['char'] = line[0][-1]
ret_val['data'] = line[1]
return ret_val
def check_compliance(password: dict) -> bool:
"""
Takes a line as returned by 'parse_line' and returns a boolean value for if the
password is compliant with the policy.
:param password: A line, freshly parsed by the 'parse_line' function.
:return: True if the password is compliant.
"""
# The following two (commented) lines solve part one of the day's puzzle.
# n = password['data'].count(password['char'])
# return password['min'] <= n <= password['max']
interest = password['data'][password['min']] + password['data'][password['max']]
return interest.count(password['char']) == 1
def parse_input(path: str) -> int:
"""
Parses the puzzle input file and calculates how many lines are compliant with the policy.
:param path: The path to the input puzzle file.
:return:
"""
with open(path) as inp:
compliance_list = [check_compliance(parse_line(line)) for line in inp]
return sum(compliance_list)
input_path = "puzzle_input.py"
print(parse_input(input_path))
|
from .order.suggested import SuggestedOrder
from .portfolio import Portfolio
from .price_parser import PriceParser
class PortfolioHandler(object):
def __init__(
self, initial_cash, events_queue,
price_handler, position_sizer, risk_manager
):
"""
The PortfolioHandler is designed to interact with the
backtesting or live trading overall event-driven
architecture. It exposes two methods, on_signal and
on_fill, which handle how SignalEvent and FillEvent
objects are dealt with.
Each PortfolioHandler contains a Portfolio object,
which stores the actual Position objects.
The PortfolioHandler takes a handle to a PositionSizer
object which determines a mechanism, based on the current
Portfolio, as to how to size a new Order.
The PortfolioHandler also takes a handle to the
RiskManager, which is used to modify any generated
Orders to remain in line with risk parameters.
"""
self.initial_cash = initial_cash
self.events_queue = events_queue
self.price_handler = price_handler
self.position_sizer = position_sizer
self.risk_manager = risk_manager
self.portfolio = Portfolio(price_handler, initial_cash)
def _create_order_from_signal(self, signal_event):
"""
Take a SignalEvent object and use it to form a
SuggestedOrder object. These are not OrderEvent objects,
as they have yet to be sent to the RiskManager object.
At this stage they are simply "suggestions" that the
RiskManager will either verify, modify or eliminate.
"""
if signal_event.suggested_quantity is None:
quantity = 0
else:
quantity = signal_event.suggested_quantity
order = SuggestedOrder(
signal_event.ticker,
signal_event.action,
quantity=quantity,
strategy_name=signal_event.strategy_name
)
return order
def _place_orders_onto_queue(self, order_list):
"""
Once the RiskManager has verified, modified or eliminated
any order objects, they are placed onto the events queue,
to ultimately be executed by the ExecutionHandler.
"""
for order_event in order_list:
self.events_queue.put(order_event)
def _convert_fill_to_portfolio_update(self, fill_event):
"""
Upon receipt of a FillEvent, the PortfolioHandler converts
the event into a transaction that gets stored in the Portfolio
object. This ensures that the broker and the local portfolio
are "in sync".
In addition, for backtesting purposes, the portfolio value can
be reasonably estimated in a realistic manner, simply by
modifying how the ExecutionHandler object handles slippage,
transaction costs, liquidity and market impact.
"""
action = fill_event.action
ticker = fill_event.ticker
quantity = fill_event.quantity
price = fill_event.price
commission = fill_event.commission
# Create or modify the position from the fill info
if 'ICBC' in self.portfolio.positions:
print(' 期初资产:{0}; 现金:{1}; 数量:{2}'.format(self.portfolio.equity / PriceParser.PRICE_MULTIPLIER, self.portfolio.cur_cash / PriceParser.PRICE_MULTIPLIER, self.portfolio.positions['ICBC'].quantity))
else:
print(' 期初资产:{0}; 现金:{1}; 数量:{2}'.format(self.portfolio.equity / PriceParser.PRICE_MULTIPLIER, self.portfolio.cur_cash / PriceParser.PRICE_MULTIPLIER, 0))
self.portfolio.transact_position(
action, ticker, quantity,
price, commission
)
print(' {0} {1} 数量:{2}; 价格:{3}; 手续费:{4}'.format(ticker, action, quantity, price / PriceParser.PRICE_MULTIPLIER, commission / PriceParser.PRICE_MULTIPLIER))
if 'ICBC' in self.portfolio.positions:
print(' 期末资产:{0}; 现金:{1}; 数量:{2}'.format(self.portfolio.equity / PriceParser.PRICE_MULTIPLIER, self.portfolio.cur_cash / PriceParser.PRICE_MULTIPLIER, self.portfolio.positions['ICBC'].quantity))
else:
print(' 期末资产:{0}; 现金:{1}; 数量:{2}'.format(self.portfolio.equity / PriceParser.PRICE_MULTIPLIER, self.portfolio.cur_cash / PriceParser.PRICE_MULTIPLIER, 0))
def on_signal(self, signal_event):
"""
This is called by the backtester or live trading architecture
to form the initial orders from the SignalEvent.
These orders are sized by the PositionSizer object and then
sent to the RiskManager to verify, modify or eliminate.
Once received from the RiskManager they are converted into
full OrderEvent objects and sent back to the events queue.
"""
# Create the initial order list from a signal event
initial_order = self._create_order_from_signal(signal_event)
# Size the quantity of the initial order
sized_order = self.position_sizer.size_order(
self.portfolio, initial_order
)
# Refine or eliminate the order via the risk manager overlay
order_events = self.risk_manager.refine_orders(
self.portfolio, sized_order
)
# Place orders onto events queue
self._place_orders_onto_queue(order_events)
def on_fill(self, fill_event):
"""
This is called by the backtester or live trading architecture
to take a FillEvent and update the Portfolio object with new
or modified Positions.
In a backtesting environment these FillEvents will be simulated
by a model representing the execution, whereas in live trading
they will come directly from a brokerage (such as Interactive
Brokers).
"""
self._convert_fill_to_portfolio_update(fill_event)
def update_portfolio_value(self):
"""
Update the portfolio to reflect current market value as
based on last bid/ask of each ticker.
"""
self.portfolio._update_portfolio()
|
from .utils import encode_attr
from .control import Control
class Item(Control):
def __init__(self, text=None, id=None, secondary_text=None, url=None, new_window=None,
icon=None, icon_color=None, icon_only=None, split=None, divider=None, onclick=None, items=[],
width=None, height=None, padding=None, margin=None,
visible=None, disabled=None):
Control.__init__(self, id=id,
width=width, height=height, padding=padding, margin=margin,
visible=visible, disabled=disabled)
self.text = text
self.secondary_text = secondary_text
self.url = url
self.new_window = new_window
self.icon = icon
self.icon_color = icon_color
self.icon_only = icon_only
self.split = split
self.divider = divider
self.onclick = onclick
self._items = []
if items and len(items) > 0:
for item in items:
self.add_item(item)
def _getControlName(self):
return "item"
def add_item(self, item):
if isinstance(item, Item):
self._items.append(item)
else:
self._items.append(Item(str(item)))
# onclick
@property
def onclick(self):
return None
@onclick.setter
def onclick(self, handler):
self._add_event_handler("click", handler)
# items
@property
def items(self):
return self._items
# text
@property
def text(self):
return self._get_attr("text")
@text.setter
def text(self, value):
self._set_attr("text", value)
# secondary_text
@property
def secondary_text(self):
return self._get_attr("secondaryText")
@secondary_text.setter
def secondary_text(self, value):
self._set_attr("secondaryText", value)
# url
@property
def url(self):
return self._get_attr("url")
@url.setter
def url(self, value):
self._set_attr("url", value)
# new_window
@property
def new_window(self):
return self._get_attr("newWindow")
@new_window.setter
def new_window(self, value):
assert value == None or isinstance(value, bool), "value must be a boolean"
self._set_attr("newWindow", value)
# icon
@property
def icon(self):
return self._get_attr("icon")
@icon.setter
def icon(self, value):
self._set_attr("icon", value)
# icon_color
@property
def icon_color(self):
return self._get_attr("iconColor")
@icon_color.setter
def icon_color(self, value):
self._set_attr("iconColor", value)
# icon_only
@property
def icon_only(self):
return self._get_attr("iconOnly")
@icon_only.setter
def icon_only(self, value):
assert value == None or isinstance(value, bool), "icon_only must be a boolean"
self._set_attr("iconOnly", value)
# split
@property
def split(self):
return self._get_attr("split")
@split.setter
def split(self, value):
assert value == None or isinstance(value, bool), "split must be a boolean"
self._set_attr("split", value)
# divider
@property
def divider(self):
return self._get_attr("divider")
@divider.setter
def divider(self, value):
assert value == None or isinstance(value, bool), "divider must be a boolean"
self._set_attr("divider", value)
def _getChildren(self):
return self._items
class Button(Control):
def __init__(self, text=None, id=None, primary=None, compound=None, action=None, toolbar=None,
split=None, secondary_text=None, url=None, new_window=None,
title=None, icon=None, icon_color=None, data=None, onclick=None, items=[],
width=None, height=None, padding=None, margin=None,
visible=None, disabled=None):
Control.__init__(self, id=id,
width=width, height=height, padding=padding, margin=margin,
visible=visible, disabled=disabled)
self.primary = primary
self.compound = compound
self.action = action
self.toolbar = toolbar
self.split = split
self.text = text
self.secondary_text = secondary_text
self.url = url
self.new_window = new_window
self.title = title
self.icon = icon
self.icon_color = icon_color
self.data = data
self.onclick = onclick
self._items = []
if items and len(items) > 0:
for item in items:
self.add_item(item)
def _getControlName(self):
return "button"
def add_item(self, item):
assert isinstance(item, Item), 'button can hold items only'
self._items.append(item)
# onclick
@property
def onclick(self):
return None
@onclick.setter
def onclick(self, handler):
self._add_event_handler("click", handler)
# primary
@property
def primary(self):
return self._get_attr("primary")
@primary.setter
def primary(self, value):
assert value == None or isinstance(value, bool), "primary must be a boolean"
self._set_attr("primary", value)
# compound
@property
def compound(self):
return self._get_attr("compound")
@compound.setter
def compound(self, value):
assert value == None or isinstance(value, bool), "compound must be a boolean"
self._set_attr("compound", value)
# action
@property
def action(self):
return self._get_attr("action")
@action.setter
def action(self, value):
assert value == None or isinstance(value, bool), "action must be a boolean"
self._set_attr("action", value)
# toolbar
@property
def toolbar(self):
return self._get_attr("toolbar")
@toolbar.setter
def toolbar(self, value):
assert value == None or isinstance(value, bool), "toolbar must be a boolean"
self._set_attr("toolbar", value)
# split
@property
def split(self):
return self._get_attr("split")
@split.setter
def split(self, value):
assert value == None or isinstance(value, bool), "split must be a boolean"
self._set_attr("split", value)
# text
@property
def text(self):
return self._get_attr("text")
@text.setter
def text(self, value):
self._set_attr("text", value)
# secondary_text
@property
def secondary_text(self):
return self._get_attr("secondaryText")
@secondary_text.setter
def secondary_text(self, value):
self._set_attr("secondaryText", value)
# url
@property
def url(self):
return self._get_attr("url")
@url.setter
def url(self, value):
self._set_attr("url", value)
# new_window
@property
def new_window(self):
return self._get_attr("newWindow")
@new_window.setter
def new_window(self, value):
assert value == None or isinstance(value, bool), "new_window must be a boolean"
self._set_attr("newWindow", value)
# title
@property
def title(self):
return self._get_attr("title")
@title.setter
def title(self, value):
self._set_attr("title", value)
# icon
@property
def icon(self):
return self._get_attr("icon")
@icon.setter
def icon(self, value):
self._set_attr("icon", value)
# icon_color
@property
def icon_color(self):
return self._get_attr("iconColor")
@icon_color.setter
def icon_color(self, value):
self._set_attr("iconColor", value)
# data
@property
def data(self):
return self._get_attr("data")
@data.setter
def data(self, value):
self._set_attr("data", value)
def _getChildren(self):
return self._items |
# Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def mpmcQueue():
http_archive(
name = "mpmc_queue",
build_file = "//bazel/deps/mpmc_queue:build.BUILD",
sha256 = "675004f332c74390c16efea98f30ebc636a2855434bdbfa24eaa703501a6ae0f",
strip_prefix = "MPMCQueue-5883e32b07e8a60c22d532d9120ea5c11348aea9",
urls = [
"https://github.com/Unilang/MPMCQueue/archive/5883e32b07e8a60c22d532d9120ea5c11348aea9.tar.gz",
],
)
|
# Advent of Code - Day 10
def parse(input):
return [[char for char in row] for row in input]
def corrupt_closer(open_and_shut, row):
"""Returns boolean indicating corruptness and first corrupt closer"""
openers = open_and_shut.keys()
closers = open_and_shut.values()
openers_stack = []
for char in row:
if char in openers:
openers_stack.append(char)
elif char in closers:
if not openers_stack:
return True, char
else:
last_unclosed_opener = openers_stack.pop()
if not open_and_shut[last_unclosed_opener] == char:
return True, char
return False, None
def score_invalid_closers(invalid_closers):
points = {")": 3, "]": 57, "}": 1197, ">": 25137}
return sum([points[closer] for closer in invalid_closers])
def result_part1(input):
data = parse(input)
open_and_shut = {"(": ")", "[": "]", "{": "}", "<": ">"}
invalid_closers = []
for row in data:
corrupt, char = corrupt_closer(open_and_shut, row)
if corrupt:
invalid_closers.append(char)
return score_invalid_closers(invalid_closers)
def incorrect_open_close(open_and_shut, row):
opener_keys = open_and_shut.keys()
closer_keys = open_and_shut.values()
openers_stack = []
invalid_closer = []
for char in row:
if char in opener_keys:
openers_stack.append(char)
elif char in closer_keys:
if not openers_stack:
invalid_closer.append(char)
else:
last_unclosed_opener = openers_stack.pop()
if not open_and_shut[last_unclosed_opener] == char:
invalid_closer.append(char)
return invalid_closer, openers_stack
def score_unmatched_openers(matching_pairs, unmatched_openers):
points = {")": 1, "]": 2, "}": 3, ">": 4}
missing_closers = [matching_pairs[opener] for opener in unmatched_openers]
missing_closers.reverse()
total_score = 0
for closer in missing_closers:
total_score *= 5
total_score += points[closer]
return total_score
def result_part2(input):
data = parse(input)
open_and_shut = {"(": ")", "[": "]", "{": "}", "<": ">"}
scores = []
for row in data:
invalid_closer, unmatched_openers = incorrect_open_close(open_and_shut, row)
if invalid_closer:
continue
else:
scores.append(score_unmatched_openers(open_and_shut, unmatched_openers))
scores.sort(reverse=True)
middle_score = scores[int(len(scores) / 2)]
return middle_score
sample_input = [
"[({(<(())[]>[[{[]{<()<>>",
"[(()[<>])]({[<{<<[]>>(",
"{([(<{}[<>[]}>{[]{[(<()>",
"(((({<>}<{<{<>}{[]{[]{}",
"[[<[([]))<([[{}[[()]]]",
"[{[{({}]{}}([{[{{{}}([]",
"{<[[]]>}<{[{[{[]{()[[[]",
"[<(<(<(<{}))><([]([]()",
"<{([([[(<>()){}]>(<<{{",
"<{([{{}}[<[[[<>{}]]]>[]]",
]
input = sample_input
# print(parse(input))
print(result_part1(input))
print(result_part2(input))
|
import sys
from PyQt5 import QtCore, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QLabel, QGridLayout, QWidget
from PyQt5.QtCore import QSize
class HelloWindow(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setMinimumSize(QSize(280, 120))
self.setWindowTitle("Olá, Mundo! Exemplo PyQT5")
centralWidget = QWidget(self)
self.setCentralWidget(centralWidget)
gridLayout = QGridLayout(self)
centralWidget.setLayout(gridLayout)
title = QLabel("Olá Mundo para PyQt", self)
title.setAlignment(QtCore.Qt.AlignCenter)
gridLayout.addWidget(title, 0, 0)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
mainWin = HelloWindow()
mainWin.show()
sys.exit( app.exec_() )
|
# Copyright 2021 MosaicML. All Rights Reserved.
from __future__ import annotations
from dataclasses import MISSING, fields
from enum import Enum
from typing import TYPE_CHECKING, List, NamedTuple, Optional, Type, get_type_hints
import yahp as hp
from yahp.utils.interactive import query_with_options
from yahp.utils.iter_helpers import ensure_tuple
from yahp.utils.type_helpers import HparamsType, get_default_value, is_field_required, safe_issubclass
if TYPE_CHECKING:
from yahp.types import JSON, HparamsField
try:
from ruamel_yaml import YAML # type: ignore
from ruamel_yaml.comments import CommentedMap, CommentedSeq # type: ignore
except ImportError as _:
from ruamel.yaml import YAML # type: ignore
from ruamel.yaml.comments import CommentedMap, CommentedSeq # type: ignore
def _to_json_primitive(val: HparamsField) -> JSON:
if isinstance(val, Enum):
return val.name
if val is None or isinstance(val, (str, float, int, dict)):
# if dict, assuming already a json dict
return val
if isinstance(val, list):
return [_to_json_primitive(x) for x in val]
raise TypeError(f"Cannot convert value of type {type(val)} into a JSON primitive")
def _add_commenting(
cm: CommentedMap,
comment_key: str,
eol_comment: str,
typing_column: int,
choices: Optional[List[str]] = None,
) -> None:
if choices:
eol_comment = f"{eol_comment} Options: {', '.join(choices)}."
if typing_column + len(eol_comment) <= 120:
cm.yaml_add_eol_comment(eol_comment, key=comment_key, column=typing_column)
else:
cm.yaml_set_comment_before_after_key(key=comment_key, before=eol_comment)
cm.fa.set_block_style()
class CMOptions(NamedTuple):
add_docs: bool
typing_column: int
interactive: bool
def _process_abstract_hparams(hparams: Type[hp.Hparams], path_with_fname: List[str], is_list: bool, options: CMOptions):
"""Generate a template for an abstract :class:`~yahp.hparams.Hparams`.
If in interactive mode (as specified in ``options``), then a CLI prompt is used to determine which
concrete subclass should be enumerated. Otherwise, all are dumped.
Args:
hparams (Type[hp.Hparams]):
The parent of the abstract :class:`~yahp.hparams.Hparams` object.
path_with_fname (List[str]):
The path from the root :class:`~yahp.hparams.Hparams` to the abstract field.
is_list (bool): Whether the abstract field is a list.
options (CMOptions): CMOptions from :meth:`to_commented_map`.
Returns:
The generated template for the field, as a
:class:`~ruamel.yaml.comments.CommentedSeq` if ``is_list``,
otherwise, a :class:`~ruamel.yaml.comments.CommentedMap``
"""
field_name = path_with_fname[-1]
possible_sub_hparams = hparams.hparams_registry[field_name]
possible_keys = list(possible_sub_hparams.keys())
if options.interactive:
leave_blank_option = "(Leave Blank)"
dump_all_option = "(Dump all)"
name = f"Field {'.'.join(path_with_fname)}:"
if is_list:
interactive_response = query_with_options(
name=name,
options=[leave_blank_option] + possible_keys + [dump_all_option],
default_response=dump_all_option,
multiple_ok=True,
)
if leave_blank_option in interactive_response:
possible_keys = []
elif dump_all_option not in interactive_response:
possible_keys = interactive_response
else:
interactive_response = query_with_options(
name=name,
options=possible_keys + [dump_all_option],
default_response=dump_all_option,
multiple_ok=False,
)
if dump_all_option != interactive_response:
possible_keys = [interactive_response]
# filter possible_sub_hparams to those in possible_keys
possible_sub_hparams = {k: v for (k, v) in possible_sub_hparams.items() if k in possible_keys}
sub_hparams = CommentedSeq() if is_list else CommentedMap()
for sub_key, sub_type in possible_sub_hparams.items():
sub_map = to_commented_map(
cls=sub_type,
path=list(path_with_fname) + [sub_key],
options=options,
)
if is_list:
sub_item = CommentedMap()
sub_item[sub_key] = sub_map
sub_hparams.append(sub_item)
if options.add_docs:
_add_commenting(sub_item,
comment_key=sub_key,
eol_comment=sub_type.__name__,
typing_column=options.typing_column)
continue
sub_hparams[sub_key] = sub_map
if options.add_docs:
_add_commenting(sub_hparams,
comment_key=sub_key,
eol_comment=sub_type.__name__,
typing_column=options.typing_column)
return sub_hparams
def to_commented_map(
cls: Type[hp.Hparams],
options: CMOptions,
path: List[str],
) -> YAML:
"""Converts a Hparams class into a CommentedMap YAML template.
.. note::
This function should not be called directly.
Instead, use :meth:`~yahp.hparams.Hparams.dump` or
:meth:`~yahp.hparams.Hparams.dumps`.
Args:
cls (Type[hp.Hparams]): The class to geneate into a template
options (CMOptions): Options for genearting the CommentedMap
path (List[str]): Path to ``cls`` from the root.
Returns:
YAML: YAML template for ``cls``.
"""
# TODO(averylamp) accept existing fields to create a new template from an existing one
output = CommentedMap()
field_types = get_type_hints(cls)
for f in fields(cls):
if not f.init:
continue
path_with_fname = list(path) + [f.name]
ftype = HparamsType(field_types[f.name])
helptext = f.metadata.get("doc")
helptext_suffix = f" Description: {helptext}." if helptext is not None else ""
required = is_field_required(f)
default = get_default_value(f)
default_suffix = ""
optional_prefix = " (Required)"
if not required:
optional_prefix = " (Optional)"
if default is None or safe_issubclass(default, (int, float, str, Enum)):
default_suffix = f" Defaults to {default}."
elif safe_issubclass(default, hp.Hparams):
default_suffix = f" Defaults to {type(default).__name__}."
# Don't print the default, it's too big
if default == MISSING and "template_default" in f.metadata:
default = f.metadata["template_default"]
choices = []
if not ftype.is_hparams_dataclass:
if default != MISSING:
output[f.name] = _to_json_primitive(default)
elif ftype.is_list:
output[f.name] = CommentedSeq()
if ftype.is_enum:
# If an enum list, then put all enum options in the list
output[f.name].extend([x.name for x in ftype.type])
else:
output[f.name] = None
# it's a dataclass, or list of dataclasses
elif f.name not in cls.hparams_registry:
# non-abstract hparams
if default is None:
output[f.name] = None
else:
if default == MISSING:
output[f.name] = [(to_commented_map(
cls=ftype.type,
path=path_with_fname,
options=options,
))]
else:
output[f.name] = [x.to_dict() for x in ensure_tuple(default)]
if not ftype.is_list:
output[f.name] = output[f.name][0]
else:
inverted_hparams = {v: k for (k, v) in cls.hparams_registry[f.name].items()}
choices = [x.__name__ for x in cls.hparams_registry[f.name].values()]
if default is None:
output[f.name] = None
elif default == MISSING:
output[f.name] = _process_abstract_hparams(cls, path_with_fname, ftype.is_list, options)
else:
if ftype.is_list:
output[f.name] = [{inverted_hparams[type(x)]: x.to_dict()} for x in ensure_tuple(default)]
else:
output[f.name] = {inverted_hparams[type(default)]: default.to_dict()}
if options.add_docs:
_add_commenting(cm=output,
comment_key=f.name,
eol_comment=f"{str(ftype): >20}{optional_prefix}.{helptext_suffix}{default_suffix}",
typing_column=options.typing_column,
choices=choices)
return output
|
import sqlite3
import os
history_limit = 30
home = os.path.join(os.path.expanduser('~'), ".piepresto")
os.makedirs(home, exist_ok=True)
lite_db = os.path.join(home, "history.db")
history_table = """
CREATE TABLE IF NOT EXISTS history(
sql TEXT PRIMARY KEY,
update_time datetime default current_timestamp
)
"""
get_history = """
SELECT sql FROM history ORDER BY update_time DESC LIMIT {}
""".format(history_limit)
upsert_history = """
INSERT INTO history(sql) VALUES (?) ON CONFLICT(sql) DO UPDATE SET update_time = current_timestamp
"""
class DBLite():
def __init__(self):
self.connection = sqlite3.connect(lite_db)
self.connection.execute(history_table)
def history(self):
cursor = self.connection.execute(get_history)
return [row[0] for row in cursor]
def upsert(self, stmt):
self.connection.execute(upsert_history, (stmt,))
self.connection.commit()
def close(self):
if self.connection:
self.connection.close()
def __del__(self):
self.close()
|
import abc
from nesim.frame import Frame
from typing import Dict, List
from pathlib import Path
from nesim.devices.send_receiver import SendReceiver
from nesim.devices.cable import DuplexCableHead
from nesim.devices.device import Device
class MultiplePortDevice(Device, metaclass=abc.ABCMeta):
"""Representa un dispositivo que contiene múltiples puertos.
Parameters
----------
name : str
Nombre del dispositivo
ports_count : int
Cantidad de puertos
signal_time : int
``Signal time`` de la simulación
"""
def __init__(self, name: str, ports_count: int, signal_time: int):
self.signa_time = signal_time
self._updating = False
ports = {}
for i in range(ports_count):
ports[f'{name}_{i+1}'] = self.create_send_receiver(i)
self.ports_buffer = [[] for _ in range(ports_count)]
self.mac_table: Dict[int, str] = {}
super().__init__(name, ports)
@property
def is_active(self):
"""bool : Estado del switch"""
return any([sr.is_active for sr in self.ports.values()])
def save_log(self, path=''):
output_folder = Path(path)
output_folder.mkdir(parents=True, exist_ok=True)
output_path = output_folder / Path(f'{self.name}.txt')
with open(str(output_path), 'w+') as file:
header = f'| {"Time (ms)": ^10} |'
for port in self.ports.keys():
header += f' {port: ^11} |'
header_len = len(header)
header += f'\n| {"": ^10} |'
for port in self.ports.keys():
header += f' {"Rece . Sent": ^11} |'
file.write(f'{"-" * header_len}\n')
file.write(f'{header}\n')
file.write(f'{"-" * header_len}\n')
file.write('\n'.join(self.logs))
file.write(f'\n{"-" * header_len}\n')
def special_log(self, time: int, received: List[int], sent: List[int]):
"""
Representación especial para los logs de los switch.
Parameters
----------
time : int
Timepo de ejecución de la simulación.
received : List[int]
Lista de bits recibidos por cada puerto.
sent : List[int]
Lista de bits enviados por cada puerto.
"""
log_msg = f'| {time: ^10} |'
for bit_re, bit_se in zip(received, sent):
if bit_re == '-' and bit_se == '-':
log_msg += f' {"---" : ^11} |'
else:
log_msg += f' {bit_re :>4} . {bit_se: <4} |'
self.logs.append(log_msg)
def broadcast(self, from_port, data):
"""Envia un frame por todos los puertos.
Parameters
----------
from_port : str
Puerto del cual se transmite la información.
data : List[List[int]]
Frame a ser enviado.
"""
for port, send_receiver in self.ports.items():
if port != from_port and send_receiver.cable_head is not None:
send_receiver.send(data)
def reset(self):
pass
def update(self, time: int)-> None:
for send_receiver in self.ports.values():
send_receiver.update()
super().update(time)
def receive(self) -> None:
"""
Ordena a todos los puertos a recibir la información que les
esté llegnado. (Leer del cable)
"""
for send_receiver in self.ports.values():
if send_receiver.cable_head is not None:
send_receiver.receive()
received = [self.get_port_value(p) for p in self.ports]
sent = [self.get_port_value(p, False) for p in self.ports]
self.special_log(self.sim_time, received, sent)
@abc.abstractmethod
def on_frame_received(self, frame: Frame, port: str) -> None:
"""Este método se ejecuta cada vez que se recibe un frame en
uno de los puertos.
Parameters
----------
frame : Frame
Frame recibido.
port : str
Puerto por el cual llegó el frame.
"""
def handle_buffer_data(self, port: str) -> None:
"""Se encarga de procesar los datos en el buffer de un puerto.
Parameters
----------
port : str
Nombre del puerto
"""
data = self.ports_buffer[port]
frame = Frame(data)
if not frame.is_valid:
return
self.on_frame_received(frame, port + 1)
self.ports_buffer[port] = []
def get_port_value(self, port_name: str, received: bool = True):
"""
Devuelve el valor del cable conectado a un puerto dado. En caso de no
tener un cable conectado devuelve ``'-'``.
Parameters
----------
port_name : str
Nombre del puerto.
"""
send_receiver = self.ports[port_name]
bit = None
if send_receiver.cable_head is not None:
if received:
bit = send_receiver.cable_head.receive_value
else:
bit = send_receiver.cable_head.send_value
return str(bit) if bit is not None else '-'
def receive_on_port(self, port: str, bit: int):
"""Guarda el bit recibido en un puerto y procesa los datos del mismo.
Parameters
----------
port : str
Nombre del puerto.
bit : int
Bit recibido
"""
self.ports_buffer[port].append(bit)
self.handle_buffer_data(port)
def create_send_receiver(self, port: str):
"""Crea un ``SendReceiver``.
Parameters
----------
port : str
Puerto al que será asignado el ``SendReceiver``.
Returns
-------
SendReceiver
``SendReceiver`` creado.
"""
send_receiver = SendReceiver(self.signa_time, None)
send_receiver.on_receive.append(
lambda bit : self.receive_on_port(port, bit)
)
return send_receiver
def connect(self, cable_head: DuplexCableHead, port_name: str):
send_receiver = self.ports[port_name]
if send_receiver.cable_head is not None:
raise ValueError(f'Port {port_name} is currently in use.')
send_receiver.cable_head = cable_head
def disconnect(self, port_name: str):
self.ports_buffer[list(self.ports.keys()).index(port_name)] = []
self.ports[port_name].disconnect()
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import webapp2
from frontend.handlers import cracas_dashboard
from frontend.handlers import crash_config
from frontend.handlers import crash_handler
from frontend.handlers import cracas_result_feedback
from frontend.handlers import fracas_dashboard
from frontend.handlers import fracas_result_feedback
from frontend.handlers import triage_analysis
from frontend.handlers import update_component_config
frontend_web_pages_handler_mappings = [
('/config', crash_config.CrashConfig),
('/update-component-config',
update_component_config.UpdateComponentConfig),
('/cracas-dashboard', cracas_dashboard.CracasDashBoard),
('/cracas-result-feedback',
cracas_result_feedback.CracasResultFeedback),
('/fracas-dashboard', fracas_dashboard.FracasDashBoard),
('/fracas-result-feedback',
fracas_result_feedback.FracasResultFeedback),
('/triage-analysis',
triage_analysis.TriageAnalysis),
('/_ah/push-handlers/crash/fracas', crash_handler.CrashHandler),
('/_ah/push-handlers/crash/cracas', crash_handler.CrashHandler),
('/_ah/push-handlers/crash/clusterfuzz', crash_handler.CrashHandler),
]
frontend_app = webapp2.WSGIApplication(
frontend_web_pages_handler_mappings, debug=False)
|
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional
from xsdata.models.datatype import XmlDuration
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-duration-enumeration-2-NS"
class NistschemaSvIvAtomicDurationEnumeration2Type(Enum):
P2030_Y06_M26_DT21_H55_M47_S = XmlDuration("P2030Y06M26DT21H55M47S")
P1979_Y03_M06_DT16_H39_M48_S = XmlDuration("P1979Y03M06DT16H39M48S")
P1987_Y06_M06_DT18_H56_M03_S = XmlDuration("P1987Y06M06DT18H56M03S")
P1977_Y04_M02_DT05_H48_M43_S = XmlDuration("P1977Y04M02DT05H48M43S")
P1995_Y02_M01_DT05_H15_M19_S = XmlDuration("P1995Y02M01DT05H15M19S")
P2019_Y06_M07_DT15_H23_M38_S = XmlDuration("P2019Y06M07DT15H23M38S")
P1976_Y12_M13_DT09_H35_M31_S = XmlDuration("P1976Y12M13DT09H35M31S")
P1989_Y03_M16_DT04_H44_M26_S = XmlDuration("P1989Y03M16DT04H44M26S")
P1993_Y12_M14_DT04_H03_M02_S = XmlDuration("P1993Y12M14DT04H03M02S")
@dataclass
class NistschemaSvIvAtomicDurationEnumeration2:
class Meta:
name = "NISTSchema-SV-IV-atomic-duration-enumeration-2"
namespace = "NISTSchema-SV-IV-atomic-duration-enumeration-2-NS"
value: Optional[NistschemaSvIvAtomicDurationEnumeration2Type] = field(
default=None,
metadata={
"required": True,
}
)
|
from rest_framework import serializers
from bullet_point.models import BulletPoint, Endorsement, Flag, Vote
from user.serializers import UserSerializer
from utils.http import get_user_from_request
class EndorsementSerializer(serializers.ModelSerializer):
bullet_point = serializers.PrimaryKeyRelatedField(
many=False,
read_only=True
)
created_by = UserSerializer(
read_only=False,
default=serializers.CurrentUserDefault()
)
class Meta:
fields = [
'bullet_point',
'created_by',
'created_date',
]
model = Endorsement
class FlagSerializer(serializers.ModelSerializer):
bullet_point = serializers.PrimaryKeyRelatedField(
many=False,
read_only=True
)
created_by = UserSerializer(
read_only=False,
default=serializers.CurrentUserDefault()
)
class Meta:
fields = [
'bullet_point',
'created_by',
'created_date',
'reason',
]
model = Flag
class BulletPointSerializer(serializers.ModelSerializer):
tail_created_by = serializers.SerializerMethodField()
tail_editors = serializers.SerializerMethodField()
created_by = UserSerializer(
read_only=False,
default=serializers.CurrentUserDefault()
)
editors = serializers.SerializerMethodField()
score = serializers.SerializerMethodField()
user_vote = serializers.SerializerMethodField()
promoted = serializers.SerializerMethodField()
paper_slug = serializers.SerializerMethodField()
endorsements = EndorsementSerializer(read_only=True, many=True)
flags = FlagSerializer(read_only=True, many=True)
class Meta:
model = BulletPoint
exclude = []
read_only_fields = [
'is_head',
'is_tail',
'previous',
'tail',
]
def get_tail_created_by(self, obj):
if obj.is_tail:
tail = obj
else:
tail = obj.tail
return UserSerializer(tail.created_by).data
def get_tail_editors(self, obj):
if obj.is_tail:
tail = obj
else:
tail = obj.tail
return self.get_editors(tail)
def get_editors(self, obj):
return UserSerializer(obj.editors, many=True).data
def get_score(self, obj):
return obj.calculate_score()
def get_user_vote(self, obj):
user = get_user_from_request(self.context)
if user and not user.is_anonymous:
vote = obj.votes.filter(created_by=user)
if vote.exists():
return BulletPointVoteSerializer(vote.last()).data
return False
return False
def get_promoted(self, obj):
if self.context.get('exclude_promoted_score', False):
return None
return obj.get_promoted_score()
def get_paper_slug(self, obj):
if obj.paper:
return obj.paper.slug
class BulletPointTextOnlySerializer(serializers.ModelSerializer):
paper = serializers.PrimaryKeyRelatedField(many=False, read_only=True)
class Meta:
model = BulletPoint
fields = [
'is_head',
'is_public',
'ordinal',
'paper',
'plain_text',
'text',
]
read_only_fields = fields
class BulletPointVoteSerializer(serializers.ModelSerializer):
bullet_point = serializers.SerializerMethodField()
class Meta:
fields = [
'id',
'created_by',
'created_date',
'vote_type',
'bullet_point',
]
model = Vote
def get_bullet_point(self, obj):
if self.context.get('include_bullet_data', False):
serializer = BulletPointSerializer(obj.bulletpoint)
return serializer.data
return None
|
# Loads configuration file
from ConfigParser import ConfigParser
cfg = ConfigParser()
cfg.readfp(open('apps.cfg'))
# General variables
COINSCOPED_API_ADDR = cfg.get('general', 'coinscoped_api_addr')
COINSCOPED_API_PORT = int(cfg.get('general', 'coinscoped_api_port'))
# Nethealth
MAINNET_PORT = 8333
TESTNET_PORT = 18333
HALF_TARGET = 0.5
TARGET = float(cfg.get('nethealth', 'target'))
WIPE_TIME = int(cfg.get('nethealth', 'wipe_time'))
WIPE_DELAY = int(cfg.get('nethealth', 'wipe_delay'))
CARBON_SERVER = cfg.get('nethealth', 'carbon_server')
CARBON_PICKLE_PORT = int(cfg.get('nethealth', 'carbon_pickle_port'))
UPDATE_DELAY = int(cfg.get('nethealth', 'update_delay'))
|
# Generated by Django 2.1.5 on 2019-02-11 16:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("iert_news", "0004_auto_20190211_1631")]
operations = [migrations.RenameModel(old_name="news", new_name="new")]
|
import os
import warnings
import pytest
from ...utils import LightkurveDeprecationWarning, LightkurveError
from ... import PACKAGEDIR, KeplerTargetPixelFile, TessTargetPixelFile
from .. import read
def test_read():
# define paths to k2 and tess data
k2_path = os.path.join(PACKAGEDIR, "tests", "data", "test-tpf-star.fits")
tess_path = os.path.join(PACKAGEDIR, "tests", "data", "tess25155310-s01-first-cadences.fits.gz")
# Ensure files are read in as the correct object
k2tpf = read(k2_path)
assert(isinstance(k2tpf, KeplerTargetPixelFile))
tesstpf = read(tess_path)
assert(isinstance(tesstpf, TessTargetPixelFile))
# Open should fail if the filetype is not recognized
try:
read(os.path.join(PACKAGEDIR, "data", "lightkurve.mplstyle"))
except LightkurveError:
pass
# Can you instantiate with a path?
assert(isinstance(KeplerTargetPixelFile(k2_path), KeplerTargetPixelFile))
assert(isinstance(TessTargetPixelFile(tess_path), TessTargetPixelFile))
# Can open take a quality_bitmask argument?
assert(read(k2_path, quality_bitmask='hard').quality_bitmask == 'hard')
def test_open():
"""Does the deprecated `open` function still work?"""
from .. import open
with warnings.catch_warnings(): # lk.open is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
# define paths to k2 and tess data
k2_path = os.path.join(PACKAGEDIR, "tests", "data", "test-tpf-star.fits")
tess_path = os.path.join(PACKAGEDIR, "tests", "data", "tess25155310-s01-first-cadences.fits.gz")
# Ensure files are read in as the correct object
k2tpf = open(k2_path)
assert(isinstance(k2tpf, KeplerTargetPixelFile))
tesstpf = open(tess_path)
assert(isinstance(tesstpf, TessTargetPixelFile))
# Open should fail if the filetype is not recognized
try:
open(os.path.join(PACKAGEDIR, "data", "lightkurve.mplstyle"))
except LightkurveError:
pass
# Can you instantiate with a path?
assert(isinstance(KeplerTargetPixelFile(k2_path), KeplerTargetPixelFile))
assert(isinstance(TessTargetPixelFile(tess_path), TessTargetPixelFile))
# Can open take a quality_bitmask argument?
assert(open(k2_path, quality_bitmask='hard').quality_bitmask == 'hard')
def test_filenotfound():
"""Regression test for #540; ensure lk.read() yields `FileNotFoundError`."""
with pytest.raises(FileNotFoundError):
read("DOESNOTEXIST")
|
import re
from eth_utils import (
is_string,
is_list_like,
)
from .events import (
construct_event_topic_set,
construct_event_data_set,
)
from web3.utils.validation import (
validate_address,
)
def construct_event_filter_params(event_abi,
contract_address=None,
argument_filters=None,
topics=None,
fromBlock=None,
toBlock=None,
address=None):
filter_params = {}
if topics is None:
topic_set = construct_event_topic_set(event_abi, argument_filters)
else:
topic_set = [topics] + construct_event_topic_set(event_abi, argument_filters)
if len(topic_set) == 1 and is_list_like(topic_set[0]):
filter_params['topics'] = topic_set[0]
else:
filter_params['topics'] = topic_set
if address and contract_address:
if is_list_like(address):
filter_params['address'] = address + [contract_address]
elif is_string(address):
filter_params['address'] = [address, contract_address]
else:
raise ValueError(
"Unsupported type for `address` parameter: {0}".format(type(address))
)
elif address:
filter_params['address'] = address
elif contract_address:
filter_params['address'] = contract_address
if 'address' not in filter_params:
pass
elif is_list_like(filter_params['address']):
for addr in filter_params['address']:
validate_address(addr)
else:
validate_address(filter_params['address'])
if fromBlock is not None:
filter_params['fromBlock'] = fromBlock
if toBlock is not None:
filter_params['toBlock'] = toBlock
data_filters_set = construct_event_data_set(event_abi, argument_filters)
return data_filters_set, filter_params
class Filter:
callbacks = None
running = None
stopped = False
poll_interval = None
filter_id = None
def __init__(self, web3, filter_id):
self.web3 = web3
self.filter_id = filter_id
self.callbacks = []
super(Filter, self).__init__()
def __str__(self):
return "Filter for {0}".format(self.filter_id)
def format_entry(self, entry):
"""
Hook for subclasses to change the format of the value that is passed
into the callback functions.
"""
return entry
def is_valid_entry(self, entry):
"""
Hook for subclasses to implement additional filtering layers.
"""
return True
def _filter_valid_entries(self, entries):
return filter(self.is_valid_entry, entries)
def get_new_entries(self):
self._ensure_not_running("get_new_entries")
log_entries = self._filter_valid_entries(self.web3.eth.getFilterChanges(self.filter_id))
return self._format_log_entries(log_entries)
def get_all_entries(self):
self._ensure_not_running("get_all_entries")
log_entries = self._filter_valid_entries(self.web3.eth.getFilterLogs(self.filter_id))
return self._format_log_entries(log_entries)
class BlockFilter(Filter):
pass
class TransactionFilter(Filter):
pass
ZERO_32BYTES = '[a-f0-9]{64}'
def construct_data_filter_regex(data_filter_set):
return re.compile((
'^' +
'|'.join((
'0x' + ''.join(
(ZERO_32BYTES if v is None else v[2:] for v in data_filter)
)
for data_filter in data_filter_set
)) +
'$'
))
class LogFilter(Filter):
data_filter_set = None
data_filter_set_regex = None
log_entry_formatter = None
def __init__(self, *args, **kwargs):
self.log_entry_formatter = kwargs.pop(
'log_entry_formatter',
self.log_entry_formatter,
)
if 'data_filter_set' in kwargs:
self.set_data_filters(kwargs.pop('data_filter_set'))
super(LogFilter, self).__init__(*args, **kwargs)
def _ensure_not_running(self, method_name):
if self.running:
raise ValueError(
"Cannot call `{0}` on a filter object which is actively watching"
.format(method_name)
)
def _format_log_entries(self, log_entries=None):
if log_entries is None:
log_entries = []
formatted_log_entries = [
self.format_entry(log_entry) for log_entry in log_entries
]
return formatted_log_entries
def format_entry(self, entry):
if self.log_entry_formatter:
return self.log_entry_formatter(entry)
return entry
def set_data_filters(self, data_filter_set):
self.data_filter_set = data_filter_set
if any(data_filter_set):
self.data_filter_set_regex = construct_data_filter_regex(
data_filter_set,
)
def is_valid_entry(self, entry):
if not self.data_filter_set_regex:
return True
return bool(self.data_filter_set_regex.match(entry['data']))
class ShhFilter(Filter):
pass
|
from app import app
def run():
print("""
+===================================+
¦ Parkwood Vale Harriers Webapp ¦
+===================================+
¦ Stop the application by either ¦
¦ closing the console window, or ¦
¦ pressing CTRL+C. ¦
+===================================+
¦ Made by Christopher Stevens ¦
¦ GITHUB/PUBLIC VERSION ¦
+===================================+
""")
app.run(debug=True, use_reloader=False)
if __name__ == "__main__":
run() |
default_app_config = "pinaxcon.registrasion.apps.RegistrasionConfig"
|
from control import *
emergency() |
#!/usr/bin/env python
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington CoMotion, email: [email protected].
## @file /GUIs/pyrosetta_toolkit/modules/DesignBreakdown.py
## @brief Class for analyzing design results from a fasta of sequences
## @author Jared Adolf-Bryfogle ([email protected])
#Rosetta Imports
from rosetta import *
#Python Imports
import re
import os
import sqlite3
import sys
from optparse import OptionParser, IndentedHelpFormatter
#Tkinter Imports
from tkinter import *
import tkinter.filedialog
import tkinter.simpledialog
#Toolkit Imports
from jade2.pyrosetta_toolkit.modules.prettytable.prettytable import *
from jade2.pyrosetta_toolkit.modules.definitions.restype_definitions import definitions
class DesignBreakdown:
"""
This class functions in organizing results for a Rosetta design run. Perhaps you can do this using features. Probably. Anyhow, this will work for the GUI.
It can output a text file, a database, and run an R script to graph the results.
It also has some GUI functions, but can be run as an independant script.
FASTA should have > format: pdbID region pdbpath OR pdbID pdbpath for whole structure comparisons. (region designated as start:end:chain)
LIMITATIONS:
1) Currently, it does not deal with extensions or deletions in the designed poses.
2) Currently, No DNA, polymers, or other non-cannonicals.
3) Currently, Does not print or output decoys with mutations of x, y, z at positions a, b, c
However, it does output a raw_data table in the SQLITE3 database, which will allow advanced querys to get this answer.
"""
def __init__(self, fasta_path, reference_path, output_directory=False, region=False):
self.output_directory = output_directory
self.sequences = []; # List of SequenceInfo objects
self.reference_path = reference_path
self.reference_sequence = None
self.reference_pose = Pose()
self.main_region = region
self.regions = dict()
self.load_sequences(fasta_path)
self.results = SequenceResults()
self.aa_codes = definitions().get_all_one_letter_codes()
#Calculate - If return number is 0, exit.
if not self.calculate_results():
return
#Data Output
if not self.output_directory:
self.output_directory = os.path.dirname(fasta_path)+'/'+os.path.basename(fasta_path)+"_RESULTS"
if not os.path.exists(self.output_directory):
os.mkdir(self.output_directory)
print("Outputting results to: "+self.output_directory)
def run_outputs(self):
self.output_basic_table()
self.output_prettytable()
self.output_database()
self.output_plots()
def load_sequences(self, fasta_path):
"""
Opens Fasta file and appends a list of SequenceInfo objects.
"""
FILE = open(fasta_path, 'r')
for line in FILE:
if re.search(">", line):
info = line.strip()
info = info[1:]
infoSP = info.split()
Seq = SequenceInfo()
if len(infoSP)<2:continue
elif len(infoSP)==2:
Seq.set_pdbID(infoSP[0])
Seq.set_pdbpath(infoSP[1])
elif len(infoSP)==3:
Seq.set_pdbID(infoSP[0])
Seq.set_region(infoSP[1])
self.regions[infoSP[1]]=""
Seq.set_pdbpath(infoSP[2])
self.sequences.append(Seq)
continue
line = line.strip()
if not line:continue
print(line)
self.sequences[-1].set_sequence(line)
FILE.close()
print("Sequences Loaded")
def calculate_results(self):
print("Calculating Results")
pose_from_file(self.reference_pose, self.reference_path)
print(self.reference_pose)
self.reference_sequence = self.reference_pose.sequence()
print(self.reference_sequence)
for i in range(0, self.reference_pose.total_residue()):
self.results.add_reference_residue(i+1, self.reference_sequence[i])
if not self.sequences:return 0
#Check to make sure reference pose matches length of all sequences
#Check that all sequences are the same length
if not self.regions:
if not self.are_sequences_same_length:
print("Sequences are not same length, and no region was specified in fasta. Cannot continue")
return 0
if (self.reference_pose.total_residue() != self.sequences[0].get_length()):
print("Sequence length of Fasta does not match sequence length of reference pose.")
region = self.main_region
if not region:
region = tkinter.simpledialog.askstring(title="Region", prompt ="Please enter a region: start end chain for PDB numbering or start end for Rosetta numbering")
if not region:return 0
regionSP = region.split()
#Make sure it's entered correctly. If not, try one more time before returning.
if 1<=len(regionSP)>3:
print("Please re-enter region.")
region = tkinter.simpledialog.askstring(title="Region", prompt ="Please enter a region: start end chain for PDB numbering or start end for Rosetta numbering")
if not region:return
regionSP = region.split()
if 1<=len(regionSP)>3:print("Region not recognized. Returning."); return
if len(regionSP)==3:
self.regions[":".join(regionSP)]=""
for Seq in self.sequences:
Seq.set_region(":".join(regionSP))
elif len(regionSP)==2:
if self.reference_pose.pdb_info().pose2pdb(int(regionSP[0])).split()[1]== self.reference_pose.pdb_info().pose2pdb(int(regionSP[1])).split()[1]:
print("One chain specified.")
chain = self.reference_pose.pdb_info().pose2pdb(int(regionSP[0])).split()[1]
pdb_start = self.reference_pose.pdb_info().pose2pdb(int(regionSP[0])).split()[0]
pdb_end = self.reference_pose.pdb_info().pose2pdb(int(regionSP[0])).split()[0]
self.regions[":".join(regionSP)]=""
for Seq in self.sequences:
Seq.set_region(":".join(regionSP))
else:
print("Multiple chains in region found. Splitting sequences to match regions.")
self.split_region_and_fix_sequences(int(regionSP[0]), int(regionSP[1]))
#Calculate Results
if not self.regions:
l = len(self.sequences[0].get_sequence())
for Seq in self.sequences:
for i in range(1, l+2):
residue = Seq.get_residue(i)
self.results.add_residue(i, residue, Seq.get_pdbpath())
else:
if not self.are_sequences_for_regions_same_length():
return 0
for region in self.regions:
print(region)
regionSP = region.split(":")
for Seq in self.sequences:
#print Seq.get_sequence()
if Seq.get_region()==region:
#This is so that if there are missing numbers in the PDB between start:end in the region:
start = self.reference_pose.pdb_info().pdb2pose(Seq.get_chain(), Seq.get_start_residue())
for i in range(0, Seq.get_length()):
print(i)
self.results.add_residue(start+i, Seq.get_residue(Seq.get_start_residue()+i), Seq.get_pdbpath())
return 1
def output_basic_table(self):
"""
Outputs a basic table of all data for importing into Excel, R, or other script. Tab delimited.
"""
resnums = self.results.get_all_residue_numbers()
reference_line = "#\t"
resnum_line = "\t"
conserved_line = "#\t"
OUTFILE = open(self.output_directory+"/RAW_DESIGN_TABLE.txt", 'w')
OUTFILE.write("# TOTAL_SEQUENCES "+repr(len(self.sequences))+"\n")
for num in resnums:
pdb_num = self.reference_pose.pdb_info().pose2pdb(num)
SP = pdb_num.split()
pdb_num = SP[0]+SP[1]; #Now it it resnumchain like 10A 11B etc.
resnum_line = resnum_line+pdb_num+"\t"
if self.reference_sequence:
reference_line = reference_line+self.results.get_reference_residue(num)+"\t"
conserved_line = conserved_line+self.results.get_percent_string(num, self.results.get_reference_residue(num))+"\t"
if self.reference_sequence:
OUTFILE.write(reference_line+"\n")
OUTFILE.write(conserved_line+"\n")
OUTFILE.write(resnum_line+"\n")
for aa in self.aa_codes:
line = aa+"\t"
for num in resnums:
line=line+self.results.get_percent_string(num, aa)+"\t"
OUTFILE.write(line+"\n")
print("Raw file written to RAW_DESIGN_TABLE.txt")
OUTFILE.close()
def output_prettytable(self):
OUTFILE = open(self.output_directory+"/PRETTY_DESIGN_TABLE.txt", 'w')
OUTFILE.write("# TOTAL_SEQUENCES "+repr(len(self.sequences))+"\n")
resnums = self.results.get_all_residue_numbers()
main_row = ["residue"]
conserved_row = ["conserved"]
reference_row = ["reference"]
if not self.regions:
for num in resnums:
main_row.append(self.get_correct_pdb_number_string(num))
if self.reference_sequence:
reference_row.append(self.results.get_reference_residue(num))
conserved_row.append(self.results.get_percent_string(num, self.results.get_reference_residue(num)))
table = PrettyTable(main_row)
if self.reference_sequence:
table.add_row(reference_row)
table.add_row(conserved_row)
for aa in self.aa_codes:
row = [aa]
for num in resnums:
row.append(self.results.get_percent_string(num, aa))
table.add_row(row)
out_string = table.get_string()
OUTFILE.write(out_string)
else:
for region in self.regions:
OUTFILE.write('# REGION '+region+"\n")
for num in resnums:
if not self.check_if_rosetta_resnum_is_part_of_region(num, region):
continue
main_row.append(self.get_correct_pdb_number_string(num))
if self.reference_sequence:
reference_row.append(self.results.get_reference_residue(num))
conserved_row.append(self.results.get_percent_string(num, self.results.get_reference_residue(num)))
table = PrettyTable(main_row)
if self.reference_sequence:
table.add_row(reference_row)
table.add_row(conserved_row)
for aa in self.aa_codes:
row = [aa]
for num in resnums:
if not self.check_if_rosetta_resnum_is_part_of_region(num, region):
continue
row.append(self.results.get_percent_string(num, aa))
table.add_row(row)
out_string = table.get_string()
OUTFILE.write(out_string)
print("PrettyTable file written to PRETTY_DESIGN_TABLE.txt")
OUTFILE.close()
def output_database(self):
self.db_path = self.output_directory+"/SQL_DESIGN_TABLE.db"
db = sqlite3.connect(self.db_path)
cur = db.cursor()
resnums = self.results.get_all_residue_numbers()
with db:
#Hard to look at, easy to execute queries on data:
#Like this: select * from design_data where prob>=.3 and type='design'. Awesomeness.
cur.execute("CREATE TABLE IF NOT EXISTS design_data(id integer PRIMARY KEY, region TEXT, type TEXT, pdb_position TEXT, rosetta_position INT, aa TEXT, prob REAL, freq INT, total_sequences INT, ref_name TEXT, decoys TEXT)")
if not self.regions:
i=0
for num in resnums:
main_row.append(self.get_correct_pdb_number_string(num))
i+=1
if self.reference_sequence:
cur.execute("INSERT INTO design_data VALUES(NULL, ?,?,?,?,?,?,?,?,?,?)", \
("full", "reference", self.get_correct_pdb_number_string(num), num, self.results.get_reference_residue(num), 1.00, 1, self.get_total_sequences(), self.reference_pose.pdb_info().name(), self.reference_pose.pdb_info().name()))
for aa in self.aa_codes:
i+=1
cur.execute("INSERT INTO design_data VALUES(NULL, ?,?,?,?,?,?,?,?,?,?)", \
("full", "design", self.get_correct_pdb_number_string(num), num, aa, self.results.get_percent(num, aa),self.results.get_freq(num, aa), \
self.get_total_sequences(), self.reference_pose.pdb_info().name(), ":".join(self.results.get_decoys_with_aa(num, aa))))
else:
i = 0
for region in self.regions:
for num in resnums:
i+=1
if not self.check_if_rosetta_resnum_is_part_of_region(num, region):
continue
cur.execute("INSERT INTO design_data VALUES(NULL, ?,?,?,?,?,?,?,?,?,?)", \
(region, "reference", self.get_correct_pdb_number_string(num), num, self.results.get_reference_residue(num), 1.00, 1, self.get_total_sequences(region), self.reference_pose.pdb_info().name(), self.reference_pose.pdb_info().name()))
for aa in self.aa_codes:
i+=1
cur.execute("INSERT INTO design_data VALUES(NULL, ?,?,?,?,?,?,?,?,?,?)", \
(region, "design", self.get_correct_pdb_number_string(num), num, aa, self.results.get_percent(num, aa),self.results.get_freq(num, aa), \
self.get_total_sequences(), self.reference_pose.pdb_info().name(),":".join(self.results.get_decoys_with_aa(num, aa))))
#Raw data table
#So you can query for combinations and get decoys with specific mutations at positions and above a probablity.
cur.execute("create table if not exists raw_data(id integer PRIMARY KEY, pdb_position TEXT, rosetta_position INT, aa TEXT, decoy TEXT)")
if not self.regions:
l = len(self.sequences[0].get_sequence())
x=0
for Seq in self.sequences:
for i in range(1, l+2):
x+=1
residue = Seq.get_residue(i)
cur.execute("INSERT INTO raw_data VALUES(NULL, ?,?,?,?)", \
(self.get_correct_pdb_number_string(i), i, residue, Seq.get_pdbpath()))
else:
x=0
for region in self.regions:
regionSP = region.split(":")
for Seq in self.sequences:
if Seq.get_region()==region:
#This is so that if there are missing numbers in the PDB between start:end in the region:
start = self.reference_pose.pdb_info().pdb2pose(Seq.get_chain(), Seq.get_start_residue())
for i in range(0, Seq.get_length()):
x+=1
num = start+i
cur.execute("INSERT INTO raw_data VALUES(NULL, ?,?,?,?)", \
(self.get_correct_pdb_number_string(num), num, Seq.get_residue(Seq.get_start_residue()+i), Seq.get_pdbpath()))
print("Database written to SQL_DESIGN_TABLE.db")
def output_plots(self):
script = self.location()+"/R_Scripts/DesignBreakdown.R"
os.system("Rscript "+script+' '+self.db_path+' '+self.output_directory)
print("Plots written to PLOTS.pdb for each region.")
### Helper Functions ###
def location(self):
"""
Allows the script to be self-aware of it's path.
So that it can be imported/ran from anywhere.
"""
p = os.path.abspath(__file__)
pathSP = os.path.split(p)
return pathSP[0]
def are_sequences_same_length(self):
"""
Determine if all items of the sequence list are the same number of residues
"""
return all(x.get_length() == self.sequences[0].get_length() for x in self.sequences)
def are_sequences_for_regions_same_length(self):
"""
Assertion that sequences are the same length given their region.
"""
#Setup dictionary for checking
region_Seq_map = dict()
for region in self.regions:
if region not in region_Seq_map:
region_Seq_map[region]=[]
for Seq in self.sequences:
region_Seq_map[Seq.get_region()].append(Seq)
#Check length for each region in dictionary.
same_length=True
for region in self.regions:
#for x in region_Seq_map[region]:
#print x.get_sequence()
if not all(x.get_length() == region_Seq_map[region][0].get_length() for x in region_Seq_map[region]):
print("Sequences for region "+region+" are not the same length.")
same_length=False
return same_length
def check_if_rosetta_resnum_is_part_of_region(self, resnum, region):
region_start = region.split(":")[0]
region_end = region.split(":")[1]
region_chain = region.split(":")[2]
pdb_num = self.reference_pose.pdb_info().pose2pdb(resnum)
SP = pdb_num.split()
pdb_num = SP[0]; pdb_chain = SP[1]
if (region_start <=pdb_num<=region_end) and pdb_chain==region_chain:
return True
else:
return False
def get_correct_pdb_number_string(self, resnum):
"""
Gets PDB numbering from pose numbering and switches order of chain and num. chain_num->num_chain
"""
pdb_num = self.reference_pose.pdb_info().pose2pdb(resnum)
SP = pdb_num.split()
pdb_num_string = SP[0]+SP[1]; #Now it it resnumchain like 10A 11B etc.
return pdb_num_string
def split_region_and_fix_sequences(self, start, end):
"""
Splits a Rosetta numbered region into PDB regions for each chain. Adds regions to self.regions, splits Seq in self.sequences.
"""
pass
def get_total_sequences(self, region=False):
if not region:
return len(self.sequences)
else:
l = 0
for Seq in self.sequences:
if Seq.get_region==region:
l+=1
return l
class SequenceResults:
"""
Simple class for holding, calculating, + accessing result data
Residue Numbers are in Rosetta numbering.
"""
def __init__(self):
self.data = dict()
self.reference = dict()
def add_residue(self, resnum, one_letter_code, decoy):
if resnum not in self.data:
self.data[resnum]=dict()
self.data[resnum][one_letter_code]=dict()
self.data[resnum][one_letter_code]['freq']=1
self.data[resnum][one_letter_code]['decoys']=[]
self.data[resnum][one_letter_code]['decoys'].append(decoy);#This is to keep track of which decoys have which mutation.
else:
if one_letter_code not in self.data[resnum]:
self.data[resnum][one_letter_code]=dict()
self.data[resnum][one_letter_code]['freq']=0
self.data[resnum][one_letter_code]['decoys']=[]
self.data[resnum][one_letter_code]['freq']+=1
self.data[resnum][one_letter_code]['decoys'].append(decoy)
def add_reference_residue(self, resnum, one_letter_code):
self.reference[resnum]=one_letter_code
def get_freq(self, resnum, one_letter_code):
try:
x = self.data[resnum][one_letter_code]['freq']
return x
except KeyError:
return 0
def get_total(self, resnum):
total = 0
for code in self.data[resnum]:
freq = self.get_freq(resnum, code)
total = total +freq
return total
def get_percent(self, resnum, one_letter_code):
total = self.get_total(resnum)
freq = self.get_freq(resnum, one_letter_code)
percent = float(freq)/float(total)
return percent
def get_percent_string(self, resnum, one_letter_code):
return "%.2f"%self.get_percent(resnum, one_letter_code)
def get_reference_residue(self, resnum):
return self.reference[resnum]
def get_all_residues_observed(self, resnum):
return sorted(self.data[resnum].keys())
def get_all_residue_numbers(self):
return sorted(self.data.keys())
def get_decoys_with_aa(self, resnum, one_letter_code):
"""
Returns all decoys with a specific mutation at a position.
"""
try:
return self.data[resnum][one_letter_code]['decoys']
except KeyError:
return []
def get_decoys_with_joint_aa(self, resnum_one_letter_code_pair):
"""
Will output decoys that have x, y, z mutations at positions a, b, c
"""
pass
### reference Comparison Functions ###
def get_all_mutated_positions(self):
mutated_positions = []
for resnum in self.data:
if resnum not in self.reference:
print("Position in data does not match position in reference")
if self.get_percent(resnum, self.reference[resnum])==1.0:
pass
else:
mutated_positions.append(resnum)
if mutated_positions:return mutated_positions
else:print("No mutations found")
def get_all_reference_percent_observed(self):
"""
Returns array of tripplets of [postion, one_letter_code, percent] of reference amino acid found.
"""
tripplet_array = []
for resnum in self.reference:
if resnum not in self.data:
print("Position in reference does not match any position in data")
percent = self.get_percent(resnum, self.reference[resnum])
tripplet = [resnum, self.reference[resnum], percent]
tripplet_array.append(tripplet)
return tripplet_array
class SequenceInfo:
"""
Simple class for holding + accessing sequence metadata
"""
def __init__(self):
self.start = ""
def get_sequence(self):
return self.sequence
def get_length(self):
return len(self.sequence)
def get_pdbID(self):
return self.pdbID
def get_pdbpath(self):
return self.pdbpath
def get_region(self):
return self.region
def get_start_residue(self):
return self.start
def get_end_residue(self):
return self.end
def get_chain(self):
return self.chain
def get_residue(self, resnum):
"""
If region is given, resnum is residue number of PDB
If not, resnum in Rosetta resnum
"""
if self.start:
index_num = resnum-self.start
print(index_num)
one_letter_code = self.sequence[index_num]
return one_letter_code
else:
#Rosetta Resnum
one_letter_code = self.sequence[int(resnum)-1]
return one_letter_code
def set_sequence(self, sequence):
print(sequence)
self.sequence = sequence
def set_pdbID(self, pdbID):
self.pdbID = pdbID
def set_pdbpath(self, pdbpath):
self.pdbpath = pdbpath
def set_region(self, region):
self.region = region
rSP = region.split(":")
self.start = int(rSP[0])
self.end = int(rSP[1])
self.chain = rSP[2]
if __name__ == '__main__':
"""
For testing and use outside of GUI.
"""
Tk()
rosetta.init()
parser = OptionParser()
args = sys.argv
parser.add_option("--fasta", "-f",
help = "Path to FASTA file for design comparisons"
)
parser.add_option("--outpath","-o",
default="/RESULTS",
help = "Full output directory path. Default is fasta file path /Results"
)
parser.add_option("--reference", "-r",
default=None,
help = "Reference pose for numbering and comparison (Required)"
)
parser.add_option("--region", "-g",
default=None,
help = "Region - if none is given in Fasta + Not whole structure used for comparison (region designated as start:end:chain)"
)
(options, args) = parser.parse_args(args=args[1:])
if not options.fasta or not os.path.exists(options.fasta):
sys.exit("Please specify a FASTA file to use for calculations.")
if options.outpath == "/RESULTS":
options.outpath = os.path.dirname(options.fasta)+"/RESULTS"
if not options.reference:
sys.exit("Reference pdb required.")
breakdown = DesignBreakdown(options.fasta, options.reference, options.outpath, options.region)
breakdown.run_outputs()
|
import streamlit as st
from streamlit_player import st_player
import cv2
import numpy as np
import tempfile
import time
from PIL import Image
############################################################
############################################################
import os
import collections
# comment out below line to enable tensorflow logging outputs
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import tensorflow as tf
import core.yolov4
import core.utils as utils
from core.yolov4 import filter_boxes
from tensorflow.python.saved_model import tag_constants
from core.config import cfg
from PIL import Image
import cv2
import numpy as np
import matplotlib.pyplot as plt
from tensorflow._api.v2.compat.v1 import ConfigProto
from tensorflow._api.v2.compat.v1 import InteractiveSession
from deep_sort import preprocessing, nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
max_cosine_distance = 0.4
nn_budget = None
nms_max_overlap = 1.0
# initialize deep sort
model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
# calculate cosine distance metric
metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
# initialize tracker
tracker = Tracker(metric)
# load configuration for object detector
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
STRIDES = np.array(cfg.YOLO.STRIDES)
ANCHORS = utils.get_anchors(cfg.YOLO.ANCHORS_TINY, True)
NUM_CLASS = len(utils.read_class_names(cfg.YOLO.CLASSES))
XYSCALE = cfg.YOLO.XYSCALE
FRAMEWORK = 'tf'
input_size = 416
video_path = './data/video/fall_sample2.mp4'
saved_model_loaded = tf.saved_model.load('./checkpoints/yolov4-416', tags=[tag_constants.SERVING])
infer = saved_model_loaded.signatures['serving_default']
############################################################
############################################################
DEMO_VIDEO = 'demo_video.mp4'
st.title('Fall Detection Application Using YOLO')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.sidebar.title('Menu')
# st.sidebar.subheader('Parameters')
@st.cache()
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
app_mode = st.sidebar.selectbox('Please Select',
['About', 'Sample Videos', 'Help', 'Run on Video']
)
if app_mode =='About':
st.markdown('''
This is an application for fall detection of individuals based on the **YOLO V.4** object detection algorithm.
The method used in this algorithm is suitable for detecting falls from a standing position or while walking. \n
This method is based on the proposed method in **Lu, K. L., & Chu, E. T. H. (2018).
An image-based fall detection system for the elderly. Applied Sciences, 8(10), 1995.**
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
st.image('TEAM_LOGO.jpg')
elif app_mode == 'Sample Videos':
st.video('demo1.mp4', format='video/mp4', start_time=0)
st.video('demo2.mp4', format='video/mp4', start_time=0)
st.video('demo3.mp4', format='video/mp4', start_time=0)
st.video('demo4.mp4', format='video/mp4', start_time=0)
elif app_mode == 'Help':
st.markdown('''
- The Ratio Factor is a factor which multiplied by the height of the bounding box of
the person at 1.5 seconds before each moment. If the height of the bounding box at each
moment is less than the multiplication value, the algorithm will detect a falling-down occurrence.
The suggested value is 5.5, but values between 5 and 7 are good choices. The higher values will lead to more
conservative results. \n
''')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
####################################################################
####################################################################
elif app_mode == 'Run on Video':
st.set_option('deprecation.showfileUploaderEncoding', False)
st.sidebar.markdown('---')
ratio = st.sidebar.slider('Ratio', min_value=1.0, max_value=8.0, value=5.5, step=0.5)
st.sidebar.markdown('---')
st.markdown(' ## Output')
st.markdown(
"""
<style>
[data-testid="stSidebar"][aria-expanded="true"] > div:first-child {
width: 300px;
}
[data-testid="stSidebar"][aria-expanded="false"] > div:first-child {
width: 300px;
margin-left: -300px;
}
</style>
""",
unsafe_allow_html=True,
)
stframe = st.empty()
video_file_buffer = st.sidebar.file_uploader("Upload a video", type=['mp4'])
tffile = tempfile.NamedTemporaryFile(delete=False)
if not video_file_buffer:
vid = cv2.VideoCapture(DEMO_VIDEO)
tffile.name = DEMO_VIDEO
else:
tffile.write(video_file_buffer.read())
vid = cv2.VideoCapture(tffile.name)
width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps_input = int(vid.get(cv2.CAP_PROP_FPS))
# codec = cv2.VideoWriter_fourcc(*FLAGS.output_format)
codec = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output_res.avi', codec, fps_input, (width, height))
st.sidebar.text('Input Video')
st.sidebar.video(tffile.name)
fps = 0
i = 0
kpi1, kpi2, kpi3 = st.beta_columns(3)
with kpi1:
kpi1 = st.markdown("**Frame Rate**")
kpi1_text = st.markdown("0")
with kpi2:
st.markdown("**Tracked Individuals**")
kpi2_text = st.markdown("0")
with kpi3:
st.markdown("**Fall Detection Status**")
kpi3_text = st.markdown('')
kpi3_text.write(f"<h1 style='text-align: center; color: green;'>{'No Fall'}</h1>", unsafe_allow_html=True)
st.markdown("<hr/>", unsafe_allow_html=True)
###################################################
###################################################
frame_num = 0
# while video is running
# DEFINING A DICTIONARY FOR TRACKING
id_Locs = collections.defaultdict(list) # FOR METHOD THREE
id_ylocs = collections.defaultdict(list) # FOR METHOD ONE
yLocs = []
falls = 0
track_dict = dict()
frame_list = []
while vid.isOpened():
i += 1
ret, frame = vid.read()
if not ret:
continue
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(frame)
else:
print('Video has ended or failed, try a different video format!')
break
frame_num += 1
frame_size = frame.shape[:2]
image_data = cv2.resize(frame, (input_size, input_size))
image_data = image_data / 255.
image_data = image_data[np.newaxis, ...].astype(np.float32)
start_time = time.time()
batch_data = tf.constant(image_data)
pred_bbox = infer(batch_data)
for key, value in pred_bbox.items():
boxes = value[:, :, 0:4]
pred_conf = value[:, :, 4:]
boxes, scores, classes, valid_detections = tf.image.combined_non_max_suppression(
boxes=tf.reshape(boxes, (tf.shape(boxes)[0], -1, 1, 4)),
scores=tf.reshape(
pred_conf, (tf.shape(pred_conf)[0], -1, tf.shape(pred_conf)[-1])),
max_output_size_per_class=50,
max_total_size=50,
iou_threshold=0.3,
score_threshold=0.2
)
# convert data to numpy arrays and slice out unused elements
num_objects = valid_detections.numpy()[0]
bboxes = boxes.numpy()[0]
bboxes = bboxes[0:int(num_objects)]
scores = scores.numpy()[0]
scores = scores[0:int(num_objects)]
classes = classes.numpy()[0]
classes = classes[0:int(num_objects)]
# format bounding boxes from normalized ymin, xmin, ymax, xmax ---> xmin, ymin, width, height
original_h, original_w, _ = frame.shape
bboxes = utils.format_boxes(bboxes, original_h, original_w)
# store all predictions in one parameter for simplicity when calling functions
pred_bbox = [bboxes, scores, classes, num_objects]
# read in all class names from config
class_names = utils.read_class_names(cfg.YOLO.CLASSES)
# by default allow all classes in .names file
# allowed_classes = list(class_names.values())
# custom allowed classes (uncomment line below to customize tracker for only people)
allowed_classes = ['person']
# loop through objects and use class index to get class name, allow only classes in allowed_classes list
names = []
deleted_indx = []
for i in range(num_objects):
class_indx = int(classes[i])
class_name = class_names[class_indx]
if class_name not in allowed_classes:
deleted_indx.append(i)
else:
names.append(class_name)
names = np.array(names)
count = len(names)
# cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 2,
# (0, 255, 0), 2)
# print("Objects being tracked: {}".format(count))
# delete detections that are not in allowed_classes
bboxes = np.delete(bboxes, deleted_indx, axis=0)
scores = np.delete(scores, deleted_indx, axis=0)
# encode yolo detections and feed to tracker
features = encoder(frame, bboxes)
detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in
zip(bboxes, scores, names, features)]
# initialize color map
cmap = plt.get_cmap('tab20b')
colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
# run non-maxima supression
boxs = np.array([d.tlwh for d in detections])
scores = np.array([d.confidence for d in detections])
classes = np.array([d.class_name for d in detections])
indices = preprocessing.non_max_suppression(boxs, classes, nms_max_overlap, scores)
detections = [detections[i] for i in indices]
# Call the tracker
tracker.predict()
tracker.update(detections)
# update tracks
for track in tracker.tracks:
if not track.is_confirmed() or track.time_since_update > 1:
continue
bbox = track.to_tlbr()
class_name = track.get_class()
# draw bbox on screen
color = colors[int(track.track_id) % len(colors)]
color = [i * 255 for i in color]
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
cv2.rectangle(frame, (int(bbox[0]), int(bbox[1] - 30)),
(int(bbox[0]) + (len(class_name) + len(str(track.track_id))) * 17, int(bbox[1])), color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 5, color, -1)
# cv2.circle(frame, (int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)), 15, (0, 255, 0), -1)
cv2.putText(frame, class_name + "-" + str(track.track_id), (int(bbox[0]), int(bbox[1] - 10)), 0, 0.75,
(255, 255, 255), 2)
#################################################
## PAPER METHOD FOR FALL DETECTION #############
#################################################
frameRate = 25
id_Locs[track.track_id].append([int(bbox[3] - bbox[1]), int(bbox[2] - bbox[0])])
for key, value in id_Locs.items():
if len(value) > int(np.floor(frameRate * 1.5)): # 1.5econds after detection a person:
# if value[-1][0] < (7/8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
# if value[-1][0] < (5.5 / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
if value[-1][0] < (ratio / 8) * value[-1 * int(np.floor(frameRate * 1.5))][0]:
print("Fall Detected")
cv2.putText(frame, "Person " + str(key) + " Fell Down", (70, 250), cv2.FONT_HERSHEY_PLAIN, 2,
(0, 0, 255), 3)
falls += 1
########################################################
# if enable, then print details about each track
# print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id),
# class_name, (
# int(bbox[0]),
# int(bbox[1]),
# int(bbox[2]),
# int(bbox[3]))))
each_id_list = [frame_num, str(track.track_id), int((bbox[0] + bbox[2]) / 2), int((bbox[1] + bbox[3]) / 2)]
frame_list.append(each_id_list)
# calculate frames per second of running detections
fps = 1.0 / (time.time() - start_time)
kpi1_text.write(f"<h1 style='text-align: center; color: red;'>{round(fps, 1)}</h1>", unsafe_allow_html=True)
kpi2_text.write(f"<h1 style='text-align: center; color: red;'>{count}</h1>", unsafe_allow_html=True)
if falls > 0:
cv2.putText(frame, "Fall Detected", (50, 100), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 0), 5)
kpi3_text.write(f"<h1 style='text-align: center; color: red;'>{'Fall Detected'}</h1>", unsafe_allow_html=True)
frame = cv2.resize(frame, (0, 0), fx=0.8, fy=0.8)
frame = image_resize(image=frame, width=640)
stframe.image(frame, channels='RGB', use_column_width=True)
out.write(frame)
vid.release()
out.release()
|
from prisma.models import Profile
# TODO: more tests
async def order() -> None:
# case: valid
await Profile.prisma().group_by(
['country'],
order={
'country': 'desc',
},
)
await Profile.prisma().group_by(
['country', 'city'],
order={
'country': 'desc',
},
)
# case: limitation
# this should error but it is not possible to both resolve the Mapping key type
# from the TypeVar and limit the number of fields allowed to 1. I would rather
# error if a non-grouped field is ordered by instead of if more than 1 field is ordered by
# as I expect the first case to be a more common error
await Profile.prisma().group_by(
['country', 'city'],
order={
'country': 'desc',
'city': 'asc',
},
)
# case: can only order by grouped fields
await Profile.prisma().group_by(
['city'],
order={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "order" of type "Mapping[ProfileScalarFieldKeysT@group_by, SortOrder] | List[Mapping[ProfileScalarFieldKeysT@group_by, SortOrder]] | None" in function "group_by"
'country': 'desc',
},
)
# case: invalid sort order
await Profile.prisma().group_by(
['country'],
order={ # E: Argument of type "dict[str, str]" cannot be assigned to parameter "order" of type "Mapping[ProfileScalarFieldKeysT@group_by, SortOrder] | List[Mapping[ProfileScalarFieldKeysT@group_by, SortOrder]] | None" in function "group_by"
'country': 'foo',
},
)
|
urls = {
"ETL-1": None,
"ETL-2": None,
"ETL-3": None,
"ETL-4": None,
"ETL-5": None,
"ETL-6": None,
"ETL-7": None,
"ETL-8B": None,
"ETL-8G": None,
"ETL-9B": None,
"ETL-9G": None
}
|
import random
import math
import copy
import itertools
import torch
import numpy as np
from utils import jsonl_to_json, remove_duplicate, flatten_list, remove_nonascii
def make_bert_batch(tokenizer, data, **kwargs):
data = jsonl_to_json(data)
sentences = data['target']
sentences = [tokenizer.encode(t) for t in sentences]
max_limit = kwargs.get('max_sentence_tokens', None)
if max_limit is not None:
sentences = list([t[:max_limit] for t in sentences])
sentences = [torch.Tensor([tokenizer.cls_id,
*t,
tokenizer.sep_id]) for t in sentences]
sentences, lengths = pad(sentences, tokenizer.pad_id)
targets = sentences.clone()
return {'sentences': sentences,
'lengths': lengths,
'targets': targets}
def make_keyword_batch(tokenizer, data, concat=False, keywords=None, lemmatize=None, **kwargs):
data = jsonl_to_json(data)
sentences = data['target']
sentences = [tokenizer.encode(t) for t in sentences]
max_limit = kwargs.get('max_sentence_tokens', None)
if max_limit is not None:
sentences = list([t[:max_limit] for t in sentences])
ordered_keywords = [[tokenizer.convert_ids_to_tokens(token) for token in sentence] for sentence in sentences]
ordered_keywords = [[lemmatize(token) for token in sentence] for sentence in ordered_keywords]
ordered_keywords = [[tokenizer.convert_tokens_to_ids(token) for token in sentence] for sentence in ordered_keywords]
ordered_keywords = [[(i, token) for i, token in enumerate(sentence) if token in keywords] for sentence in ordered_keywords]
ordered_keywords = [remove_duplicate(sentence, key=lambda x: x[1]) for sentence in ordered_keywords]
keyword_ids = [torch.Tensor([i for i, token in sentence]) for sentence in ordered_keywords]
ordered_keywords = [[token for i, token in sentence] for sentence in ordered_keywords]
unordered_keywords = [[(token, keywords[token]) for token in sentence] for sentence in ordered_keywords]
unordered_keywords = [sorted(sentence, key=lambda x: x[1], reverse=False) for sentence in unordered_keywords]
unordered_keywords = [map(lambda x: x[0], sentence) for sentence in unordered_keywords]
unordered_keywords = [torch.Tensor([tokenizer.cls_id, *keyword, tokenizer.sep_id, ]) for keyword in unordered_keywords]
ordered_keywords = [torch.Tensor([tokenizer.cls_id, *keyword, tokenizer.sep_id, ]) for keyword in ordered_keywords]
targets = [torch.Tensor([*t, tokenizer.sep_id]) for t in sentences]
sentences = [torch.Tensor([tokenizer.cls_id, *t]) for t in sentences]
# tensor B*L
sentences, lengths = pad(sentences, tokenizer.pad_id)
targets, _ = pad(targets, tokenizer.pad_id)
ordered_keywords, _ = pad(ordered_keywords, tokenizer.pad_id)
unordered_keywords, _ = pad(unordered_keywords, tokenizer.pad_id)
keyword_ids, _ = pad(keyword_ids, tokenizer.pad_id)
return {'sentences': sentences,
'lengths': lengths,
'targets': targets,
'keywords': unordered_keywords,
'ordered_keywords': ordered_keywords,
'keyword_ids': keyword_ids}
def make_mask_model_batch(tokenizer, data, random_idx=True, complementary=False, **kwargs):
data = jsonl_to_json(data)
sentences = data['target']
if type(sentences) == 'list':
sentences = [x for r in sentences for x in r]
sentences = [tokenizer.encode(t) for t in sentences]
max_limit = kwargs.get('max_sentence_tokens', None)
if max_limit is not None:
sentences = list([t[:max_limit] for t in sentences])
sentences = [torch.Tensor([tokenizer.cls_id,
*t,
tokenizer.sep_id]) for t in sentences]
targets, _ = pad(sentences, tokenizer.pad_id)
# mask words
sentences, mask_ids = mask_words(sentences, tokenizer.mask_id, random_idx, complementary)
if random_idx:
# tensor B*L
sentences, lengths = pad(sentences, tokenizer.pad_id)
else:
# len B list of tensor L*L
li = []
lengths = []
for sentence in sentences:
sentence, length = pad(sentence, tokenizer.pad_id)
li.append(sentence)
lengths.append(length)
sentences = li
return {'sentences': sentences,
'lengths': lengths,
'targets': targets,
'mask_ids': mask_ids}
def mask_words(tensors, mask_idx, random_idx=True, complementary=False):
# B(CLS+L+SEP)
if random_idx:
# mask random idx
li = []
ids = []
for t in tensors:
device = t.device
idx = random.randint(1, t.shape[0] - 1)
ids.append(idx)
if complementary:
val = t[idx].item()
t.fill_(mask_idx)
t[idx] = val
else:
t[idx] = mask_idx
li.append(t)
return li, torch.Tensor(ids).long().to(device)
else:
# generate mask for every word
li = []
for t in tensors:
t = t.unsqueeze(0).repeat(t.shape[0], 1)
eye = torch.eye(t.shape[0]).bool().to(t.device)
if complementary:
eye = ~eye
full = torch.full(t.shape, mask_idx, dtype=t.dtype).to(t.device)
t.masked_scatter_(mask=eye, source=full)
li.append(t)
# list of L*L
return li, None
def make_autoencoder_batch(tokenizer, data, **kwargs):
data = jsonl_to_json(data)
sentences = data['target']
sentences = [tokenizer.encode(t) for t in sentences]
targets = [torch.Tensor([*t, tokenizer.eos_id]) for t in sentences]
sentences = [torch.Tensor([tokenizer.sos_id, *t]) for t in sentences]
sentences, lengths = pad(sentences, tokenizer.pad_id)
targets, _ = pad(targets, tokenizer.pad_id)
return {'sentences': sentences,
'lengths': lengths,
'targets': targets}
def make_subset_mask_batch(tokenizer, data, random_idx=True,
keyword_min=1, keyword_max_ratio=0.4,
**kwargs):
data = jsonl_to_json(data)
sentences = data['target']
sentences = [tokenizer.encode(t) for t in sentences]
sentences = [torch.Tensor([tokenizer.cls_id, *t, tokenizer.sep_id]) for t in sentences]
targets = copy.deepcopy(sentences)
targets, lengths = pad(targets, tokenizer.pad_id)
if random_idx:
keyword_ids = []
for i, sentence in enumerate(sentences):
length = len(sentence) - 2
max_length = math.ceil(max(keyword_min, keyword_max_ratio * length))
keyword_num = random.randrange(keyword_min, max_length + 1)
ids = list(range(1, length + 1))
random.shuffle(ids)
mask_idx = ids[keyword_num:]
mask_idx = torch.LongTensor(mask_idx).to(sentence.device)
keyword_idx = ids[:keyword_num]
keyword_idx = torch.LongTensor(keyword_idx).to(sentence.device)
sentence[mask_idx] = tokenizer.mask_id
sentences[i] = sentence
keyword_ids.append(keyword_idx)
sentences, lengths = pad(sentences, tokenizer.pad_id)
keyword_ids, _ = pad(keyword_ids, tokenizer.pad_id)
return {'sentences': sentences,
'lengths': lengths,
'targets': targets,
'keyword_ids': keyword_ids}
else:
# for memory reasons, we should not keep source for every combinations
'''
keyword_ids = []
for i, sentence in enumerate(sentences):
length = len(sentence) - 2
max_length = math.ceil(max(keyword_min, keyword_max_ratio * length))
ids = list(range(1, length + 1))
combs = []
for L in range(1, max_length + 1):
comb_L = []
for subset in itertools.combinations(ids, L):
comb_L.append(subset)
comb_L = torch.LongTensor(comb_L).to(sentence.device)
combs.append(comb_L)
keyword_ids.append(combs)
return {'sentences': targets,
'targets': targets,
'keyword_ids': keyword_ids}
'''
# generate masks dynamically
return {'sentences': targets,
'lengths': lengths,
'targets': targets}
class BPECap(object):
def __init__(self):
super(BPECap, self).__init__()
self.small_prefix = b'\xc4\xa1'.decode()
self.big_prefix = b'\xc4\xa0'.decode()
def __call__(self, x):
return x.replace(self.small_prefix, self.big_prefix)
class ConvertToken(object):
def __init__(self):
super(ConvertToken, self).__init__()
self.bpe_capitalize = BPECap()
def __call__(self, tokenizer, token):
return tokenizer.convert_tokens_to_ids([self.bpe_capitalize(token)])[0]
def make_feature_lm_batch_with_keywords(tokenizer, data, keywords=None,
word_counter=None, feature_name_map={},
concat_group=False, use_vist=False,
force_ascii=False,
**kwargs):
# data: list of chunks: list of [item dict]
data = jsonl_to_json(data)
group_length = [len(i) for i in data['vid']]
group_mask = torch.zeros(len(data['vid']), max(group_length)).bool()
for i, length in enumerate(group_length):
group_mask[i, :length] = 1
# keyword_counter = keywords
if keywords is not None:
# restore gpt token prefix
# [:len(keywords)] part is arbitrary, and causes some bugs apparently...
# keywords = torch.Tensor(list(itertools.chain(*[tokenizer.encode(token) for token in keywords]))[:len(keywords)]).long()
convert_token = ConvertToken()
keywords = torch.Tensor([convert_token(tokenizer, token) for token in list(keywords.keys())]).long()
if 'target' in data:
batch_sentences = data['target']
def get_text(sentences):
'''
if use_vist:
sentences = [tokenizer.decode(tokenizer.encode(t)) for t in sentences]
'''
if force_ascii:
sentences = [remove_nonascii(t) for t in sentences]
sentences = [tokenizer.encode(t, add_special_tokens=False) for t in sentences]
max_limit = kwargs.get('max_sentence_tokens', None)
if max_limit is not None:
sentences = list([t[:max_limit] for t in sentences]) # undo ptb tokenization
# tensor B*L
if concat_group:
sentences = [[*t, tokenizer.seq_sep_id] for t in sentences]
lengths = torch.LongTensor([len(s) for s in sentences])
sentences = flatten_list(sentences)
# [:-1] # do not remove last seq_sep token
targets = torch.Tensor([[*sentences, tokenizer.sep_id]])
sentences = torch.Tensor([[tokenizer.cls_id, *sentences]])
else:
targets = [torch.Tensor([*t, tokenizer.sep_id]) for t in sentences]
targets, _ = pad(targets, tokenizer.pad_id)
sentences = [torch.Tensor([tokenizer.cls_id, *t]) for t in sentences]
sentences, lengths = pad(sentences, tokenizer.pad_id)
'''
word_subset = torch.zeros(sentences.shape[0], len(tokenizer)).bool().to(sentences.device)
word_subset = word_subset.scatter(dim=-1, index=sentences, value=1)
word_subset = [i.squeeze() for i in word_subset.split(1, dim=0)]
keyword_mask = None
if keywords is not None:
keyword_mask = sentences.unsqueeze(-1).expand(-1, -1, keywords.shape[0]) == keywords.view(1, 1, -1)
keyword_mask = keyword_mask.long().sum(dim=1) > 0 # VN
keyword_mask = [i.squeeze() for i in keyword_mask.split(1, dim=0)]
'''
return sentences, lengths, targets
sentences, lengths, targets = zip(*[get_text(sentence) for sentence in batch_sentences])
sentences, batch_lengths = pad(sentences, tokenizer.pad_id)
targets, _ = pad(targets, tokenizer.pad_id)
lengths, _ = pad(lengths, 0)
'''
word_subsets = pad_tensor(word_subsets, 0)
word_subsets[:, :, tokenizer.pad_id] = 0
'''
ret_batch = {
'sentences': sentences,
'batch_lengths': batch_lengths,
'lengths': lengths,
'targets': targets,
}
else:
ret_batch = {}
ret_batch = {
**ret_batch,
'vid': data['vid'],
'group_mask': group_mask,
}
if 'album_id' in data:
ret_batch = {
**ret_batch,
'album_id': data['album_id'],
}
if 'image_id' in data:
ret_batch = {
**ret_batch,
'image_id': data['image_id'],
}
if 'frame' in data:
ret_batch = {
**ret_batch,
'frame': pad_tensor(data['frame'], 0).long(),
}
# Process features if applicable
for k, v in feature_name_map.items():
if k in data:
try:
ret_batch[v] = pad_tensor(data[k], 0)
except Exception as e:
print(k)
print(data['vid'])
print(e)
from pdb_multi import set_trace; set_trace()
return ret_batch
def make_blank_filling_batch(tokenizer, data, feature_name_map={}, **kwargs):
data = jsonl_to_json(data)
sentences = data['input']
targets = data['target']
sentences = [tokenizer.encode(t) for t in sentences]
sentences = [torch.Tensor(t) for t in sentences]
sentences, lengths = pad(sentences, tokenizer.pad_id)
targets = [tokenizer.encode(t) for t in targets]
targets = [torch.Tensor(t) for t in targets]
targets, _ = pad(targets, tokenizer.pad_id)
blank_ids = sentences == tokenizer.convert_tokens_to_ids(tokenizer.blank)
ret_batch = {
'sentences': sentences,
'lengths': lengths,
'targets': targets,
'blank_ids': blank_ids,
'blank_num': data['blank_num'],
'vid': data['vid']
}
for k,v in feature_name_map.items():
if k in data:
ret_batch[v] = pad_tensor(data[k], 0)
return ret_batch
def pad(x, pad_id=0):
B = len(x)
max_size, dtype = get_max_size(x)
storage = torch.full(max_size, pad_id, dtype=torch.long).to(x[0].device)
lengths = []
def add_data(ids, t):
if hasattr(t, 'shape'):
if not torch.is_tensor(t):
t = torch.from_numpy(t)
t_shape = [slice(None, j) for j in t.shape]
storage[tuple([*ids, *t_shape])] = t
else:
for i in range(len(t)):
add_data([*ids, i], t[i])
add_data([], x)
lengths = torch.LongTensor(lengths).to(x[0].device)
return storage, lengths
def remove_pad(x, pad_id=0):
return x[:(x != pad_id).sum(-1)]
def remove_past_idx(x, idx=0):
idx = (x == idx).nonzero()
if idx.nelement() > 0:
idx = idx[0]
else:
idx = x.shape[0]
return x[: idx + 1]
def decode_tensor(tokenizer, x, split_tokens=False, use_vist=False, remove_past_sep=False):
if x.dim() < 1:
x = x.unsqueeze(0)
x = remove_pad(x, tokenizer.pad_id)
if remove_past_sep:
x = remove_past_idx(x, tokenizer.sep_id)
x = list(x.cpu().numpy())
x_cut = []
temp = []
for tok in x:
if tok == tokenizer.seq_sep_id:
x_cut.append(temp)
temp = []
else:
temp.append(tok)
x_cut.append(temp)
x_cut = [[int(i) for i in x] for x in x_cut]
if split_tokens:
return flatten_list([tokenizer.convert_ids_to_tokens(x) for x in x_cut])
elif use_vist:
return ' '.join([decode_vist(x, tokenizer) for x in x_cut])
else:
return ' '.join(tokenizer.decode(x) for x in x_cut)
def decode_vist(x, tokenizer):
# decode vist for gpt2 tokenizer
tokenizer.whitespace = getattr(tokenizer, 'whitespace', b'\xc4\xa0'.decode())
x = tokenizer.convert_ids_to_tokens(x)
x = [f" {v[1:]}" if v.startswith(tokenizer.whitespace) else v for v in x]
return ''.join(x)
def get_max_size(t):
if hasattr(t, 'shape'):
if not torch.is_tensor(t):
t = torch.from_numpy(t)
return list(t.shape), t.dtype
else:
# get max
t = [get_max_size(i) for i in t]
dtype = t[0][1]
t = [i[0] for i in t]
return [len(t), *list(np.array(t).max(axis=0))], dtype
def pad_tensor(x, val=0):
max_size, _ = get_max_size(x)
dtype = torch.float
storage = torch.full(max_size, val, dtype=dtype)
def add_data(ids, t):
if hasattr(t, 'shape'):
if not torch.is_tensor(t):
t = torch.from_numpy(t)
t_shape = [slice(None, j) for j in t.shape]
t_shape = [*ids, *t_shape]
storage[t_shape] = t
else:
for i in range(len(t)):
add_data([*ids, i], t[i])
add_data([], x)
return storage
def make_fib_batch(tokenizer, data, feature_name_map={}, **kwargs):
data = jsonl_to_json(data)
group_length = [len(i) for i in data['vid']]
group_mask = torch.zeros(len(data['vid']), max(group_length)).bool()
for i, length in enumerate(group_length):
group_mask[i, :length] = 1
def get_text(src, tgt):
src = tokenizer.encode(src)
tgt = tokenizer.encode(tgt)
mask_idx = src.index(tokenizer.mask_id)
length_diff = len(tgt) - len(src)
src_extended = src[:mask_idx + 1] + [tokenizer.mask_id] * length_diff + src[mask_idx + 1:]
return src_extended, tgt
def process_texts(srcs, tgts):
srcs, tgts = zip(*[get_text(src, tgt) for src, tgt in zip(srcs, tgts)])
srcs = [torch.Tensor(t) for t in srcs]
tgts = [torch.Tensor(t) for t in tgts]
srcs, _ = pad(srcs, tokenizer.pad_id)
tgts, lengths = pad(tgts, tokenizer.pad_id)
return srcs, tgts, lengths
srcs, tgts, lengths = zip(*[process_texts(src, tgt) for src, tgt in \
zip(data['source'], data['target'])])
sentences, batch_lengths = pad(srcs, tokenizer.pad_id)
targets, _ = pad(tgts, tokenizer.pad_id)
lengths, _ = pad(lengths, 0)
ret_batch = {
'sentences': sentences,
'batch_lengths': batch_lengths,
'lengths': lengths,
'targets': targets
}
ret_batch = {
**ret_batch,
'vid': data['vid'],
'answer': data['answer'],
'group_mask': group_mask,
}
if 'album_id' in data:
ret_batch = {
**ret_batch,
'album_id': data['album_id'],
}
if 'image_id' in data:
ret_batch = {
**ret_batch,
'image_id': data['image_id'],
}
if 'frame' in data:
ret_batch = {
**ret_batch,
'frame': pad_tensor(data['frame'], 0).long(),
}
# Process features if applicable
for k, v in feature_name_map.items():
if k in data:
try:
ret_batch[v] = pad_tensor(data[k], 0)
except Exception as e:
print(k)
print(data['vid'])
print(e)
from pdb_multi import set_trace; set_trace()
return ret_batch
def make_multichoice_batch(tokenizer, data, feature_name_map={}, **kwargs):
data = jsonl_to_json(data)
group_length = [len(i) for i in data['vid']]
group_mask = torch.zeros(len(data['vid']), max(group_length)).bool()
for i, length in enumerate(group_length):
group_mask[i, :length] = 1
def get_text(tgt):
tgt = tokenizer.encode(tgt)
return tgt
def process_texts(tgts):
tgts = [get_text(tgt) for tgt in tgts]
tgts = [torch.Tensor(t) for t in tgts]
tgts, lengths = pad(tgts, tokenizer.pad_id)
return tgts, lengths
tgts, lengths = zip(*[process_texts(tgt) for tgt in data['target']])
targets, batch_lengths = pad(tgts, tokenizer.pad_id)
lengths, _ = pad(lengths, 0)
sentences = targets
answer = data['answer']
try:
answer = [int(batch_a[0]) for batch_a in answer] # all choices has same true answer idx
except:
print(answer)
answer = torch.Tensor(answer)
targets = answer
ret_batch = {
'sentences': sentences,
'batch_lengths': batch_lengths,
'lengths': lengths,
'targets': targets
}
ret_batch = {
**ret_batch,
'vid': data['vid'],
'answer': data['answer'],
'group_mask': group_mask,
}
if 'album_id' in data:
ret_batch = {
**ret_batch,
'album_id': data['album_id'],
}
if 'image_id' in data:
ret_batch = {
**ret_batch,
'image_id': data['image_id'],
}
if 'frame' in data:
ret_batch = {
**ret_batch,
'frame': pad_tensor(data['frame'], 0).long(),
}
# Process features if applicable
for k, v in feature_name_map.items():
if k in data:
try:
ret_batch[v] = pad_tensor(data[k], 0)
except Exception as e:
print(k)
print(data['vid'])
print(e)
from pdb_multi import set_trace; set_trace()
return ret_batch
|
# -*- coding: utf-8 -*-
# @Time : 2018/11/27 18:42
# @Author : yag8009
# @File : for_notchongfu.py
# @Software: PyCharm
"""
题目:有1、2、3、4个数字,能组成多少个互不相同且无重复数字的三位数?都是多少?
1程序分析:
可填在百位、十位、个位的数字都是1、2、3、4。
组成所有的排列后再去掉不满足条件的排列
"""
class notchong:
def __init__(self, data):
self.data = data
def notchong1(self, lis=[]):
for i in self.data:
for x in self.data:
for y in self.data:
if i is not x and x is not y and i is not y:
lis.append((i, x, y))
return lis
if __name__ == '__main__':
print(notchong([1, 2, 3, 4]).notchong1())
|
"""
Taken from the networkx source code and modified to process IOCall classes.
See original at https://github.com/networkx/networkx/blob/main/networkx/readwrite/gexf.py
"""
"""Read and write graphs in GEXF format.
.. warning::
This parser uses the standard xml library present in Python, which is
insecure - see :doc:`library/xml` for additional information.
Only parse GEFX files you trust.
GEXF (Graph Exchange XML Format) is a language for describing complex
network structures, their associated data and dynamics.
This implementation does not support mixed graphs (directed and
undirected edges together).
Format
------
GEXF is an XML format. See https://gephi.org/gexf/format/schema.html for the
specification and https://gephi.org/gexf/format/basic.html for examples.
"""
import itertools
import time
from typing import Dict
import networkx as nx
from xml.etree.ElementTree import (
Element,
ElementTree,
SubElement,
tostring,
register_namespace,
)
from themis.modules.common.calls import CallsNode, IOConstructType
def write_gexf(G, path, encoding="utf-8", prettyprint=True, version="1.2draft"):
"""Write G in GEXF format to path.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Node attributes are checked according to the version of the GEXF
schemas used for parameters which are not user defined,
e.g. visualization 'viz' [2]_. See example for usage.
Parameters
----------
G : graph
A NetworkX graph
path : file or string
File or file name to write.
File names ending in .gz or .bz2 will be compressed.
encoding : string (optional, default: 'utf-8')
Encoding for text data.
prettyprint : bool (optional, default: True)
If True use line breaks and indenting in output XML.
version: string (optional, default: '1.2draft')
The version of GEXF to be used for nodes attributes checking
Examples
--------
>>> G = nx.path_graph(4)
>>> nx.write_gexf(G, "test.gexf")
# visualization data
>>> G.nodes[0]["viz"] = {"size": 54}
>>> G.nodes[0]["viz"]["position"] = {"x": 0, "y": 1}
>>> G.nodes[0]["viz"]["color"] = {"r": 0, "g": 0, "b": 256}
Notes
-----
This implementation does not support mixed graphs (directed and undirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF File Format, https://gephi.org/gexf/format/
.. [2] GEXF schema, https://gephi.org/gexf/format/schema.html
"""
writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version)
writer.add_graph(G)
writer.write(path)
def generate_gexf(G, encoding="utf-8", prettyprint=True, version="1.2draft"):
"""Generate lines of GEXF format representation of G.
"GEXF (Graph Exchange XML Format) is a language for describing
complex networks structures, their associated data and dynamics" [1]_.
Parameters
----------
G : graph
A NetworkX graph
encoding : string (optional, default: 'utf-8')
Encoding for text data.
prettyprint : bool (optional, default: True)
If True use line breaks and indenting in output XML.
version : string (default: 1.2draft)
Version of GEFX File Format (see https://gephi.org/gexf/format/schema.html)
Supported values: "1.1draft", "1.2draft"
Examples
--------
>>> G = nx.path_graph(4)
>>> linefeed = chr(10) # linefeed=\n
>>> s = linefeed.join(nx.generate_gexf(G))
>>> for line in nx.generate_gexf(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and undirected
edges together).
The node id attribute is set to be the string of the node label.
If you want to specify an id use set it as node data, e.g.
node['a']['id']=1 to set the id of node 'a' to 1.
References
----------
.. [1] GEXF File Format, https://gephi.org/gexf/format/
"""
writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version)
writer.add_graph(G)
yield from str(writer).splitlines()
class GEXF:
versions = {}
d = {
"NS_GEXF": "http://www.gexf.net/1.1draft",
"NS_VIZ": "http://www.gexf.net/1.1draft/viz",
"NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
"SCHEMALOCATION": " ".join(
["http://www.gexf.net/1.1draft", "http://www.gexf.net/1.1draft/gexf.xsd"]
),
"VERSION": "1.1",
}
versions["1.1draft"] = d
d = {
"NS_GEXF": "http://www.gexf.net/1.2draft",
"NS_VIZ": "http://www.gexf.net/1.2draft/viz",
"NS_XSI": "http://www.w3.org/2001/XMLSchema-instance",
"SCHEMALOCATION": " ".join(
["http://www.gexf.net/1.2draft", "http://www.gexf.net/1.2draft/gexf.xsd"]
),
"VERSION": "1.2",
}
versions["1.2draft"] = d
def construct_types(self):
types = [
(int, "integer"),
(float, "float"),
(float, "double"),
(bool, "boolean"),
(list, "string"),
(dict, "string"),
(int, "long"),
(str, "liststring"),
(str, "anyURI"),
(str, "string"),
]
# These additions to types allow writing numpy types
try:
import numpy as np
except ImportError:
pass
else:
# prepend so that python types are created upon read (last entry wins)
types = [
(np.float64, "float"),
(np.float32, "float"),
(np.float16, "float"),
(np.float_, "float"),
(np.int_, "int"),
(np.int8, "int"),
(np.int16, "int"),
(np.int32, "int"),
(np.int64, "int"),
(np.uint8, "int"),
(np.uint16, "int"),
(np.uint32, "int"),
(np.uint64, "int"),
(np.int_, "int"),
(np.intc, "int"),
(np.intp, "int"),
] + types
self.xml_type = dict(types)
self.python_type = dict(reversed(a) for a in types)
# http://www.w3.org/TR/xmlschema-2/#boolean
convert_bool = {
"true": True,
"false": False,
"True": True,
"False": False,
"0": False,
0: False,
"1": True,
1: True,
}
def set_version(self, version):
d = self.versions.get(version)
if d is None:
raise nx.NetworkXError(f"Unknown GEXF version {version}.")
self.NS_GEXF = d["NS_GEXF"]
self.NS_VIZ = d["NS_VIZ"]
self.NS_XSI = d["NS_XSI"]
self.SCHEMALOCATION = d["SCHEMALOCATION"]
self.VERSION = d["VERSION"]
self.version = version
class GEXFWriter(GEXF):
# class for writing GEXF format files
# use write_gexf() function
def __init__(
self, graph=None, encoding="utf-8", prettyprint=True, version="1.2draft"
):
self.construct_types()
self.prettyprint = prettyprint
self.encoding = encoding
self.set_version(version)
self.xml = Element(
"gexf",
{
"xmlns": self.NS_GEXF,
"xmlns:xsi": self.NS_XSI,
"xsi:schemaLocation": self.SCHEMALOCATION,
"version": self.VERSION,
},
)
# Make meta element a non-graph element
# Also add lastmodifieddate as attribute, not tag
meta_element = Element("meta")
subelement_text = f"NetworkX {nx.__version__}"
SubElement(meta_element, "creator").text = subelement_text
meta_element.set("lastmodifieddate", time.strftime("%Y-%m-%d"))
self.xml.append(meta_element)
register_namespace("viz", self.NS_VIZ)
# counters for edge and attribute identifiers
self.edge_id = itertools.count()
self.attr_id = itertools.count()
self.all_edge_ids = set()
# default attributes are stored in dictionaries
self.attr = {}
self.attr["node"] = {}
self.attr["edge"] = {}
self.attr["node"]["dynamic"] = {}
self.attr["node"]["static"] = {}
self.attr["edge"]["dynamic"] = {}
self.attr["edge"]["static"] = {}
if graph is not None:
self.add_graph(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s = tostring(self.xml).decode(self.encoding)
return s
def add_graph(self, G):
# first pass through G collecting edge ids
for u, v, dd in G.edges(data=True):
eid = dd.get("id")
if eid is not None:
self.all_edge_ids.add(str(eid))
# set graph attributes
if G.graph.get("mode") == "dynamic":
mode = "dynamic"
else:
mode = "static"
# Add a graph element to the XML
if G.is_directed():
default = "directed"
else:
default = "undirected"
name = G.graph.get("name", "")
graph_element = Element("graph", defaultedgetype=default, mode=mode, name=name)
self.graph_element = graph_element
self.add_nodes(G, graph_element)
self.add_edges(G, graph_element)
self.xml.append(graph_element)
def extract_node_data(self, data: Dict):
call: CallsNode = data.pop("call", None)
if call is None:
return data
retval = data
func = call.func.funcname
in_fd_present = call.input_fd is not None
out_fds_len = 0 if call.output_fd is None else len(call.output_fd)
type_hints = []
if call.input_fd is not None:
type_hints.append(call.input_fd.typ)
if call.output_fd is not None:
for fd in call.output_fd:
type_hints.append(fd.typ)
io_type = sorted(type_hints, reverse=True)[0] if len(type_hints) > 0 else IOConstructType.UNKNOWN
retval["func"] = func
retval["in_fd_present"] = in_fd_present
retval["out_fds_num"] = out_fds_len
retval["io_type"] = str(io_type)
return retval
def add_nodes(self, G, graph_element):
nodes_element = Element("nodes")
for node, data in G.nodes(data=True):
node_data = self.extract_node_data(data.copy())
node_id = str(node_data.pop("id", node))
kw = {"id": node_id}
label = str(node_data.pop("label", node))
kw["label"] = label
try:
pid = node_data.pop("pid")
kw["pid"] = str(pid)
except KeyError:
pass
try:
start = node_data.pop("start")
kw["start"] = str(start)
self.alter_graph_mode_timeformat(start)
except KeyError:
pass
try:
end = node_data.pop("end")
kw["end"] = str(end)
self.alter_graph_mode_timeformat(end)
except KeyError:
pass
# add node element with attributes
node_element = Element("node", **kw)
# add node element and attr subelements
default = G.graph.get("node_default", {})
node_data = self.add_parents(node_element, node_data)
if self.VERSION == "1.1":
node_data = self.add_slices(node_element, node_data)
else:
node_data = self.add_spells(node_element, node_data)
node_data = self.add_viz(node_element, node_data)
node_data = self.add_attributes("node", node_element, node_data, default)
nodes_element.append(node_element)
graph_element.append(nodes_element)
def add_edges(self, G, graph_element):
def edge_key_data(G):
# helper function to unify multigraph and graph edge iterator
if G.is_multigraph():
for u, v, key, data in G.edges(data=True, keys=True):
edge_data = data.copy()
edge_data.update(key=key)
edge_id = edge_data.pop("id", None)
if edge_id is None:
edge_id = next(self.edge_id)
while str(edge_id) in self.all_edge_ids:
edge_id = next(self.edge_id)
self.all_edge_ids.add(str(edge_id))
yield u, v, edge_id, edge_data
else:
for u, v, data in G.edges(data=True):
edge_data = data.copy()
edge_id = edge_data.pop("id", None)
if edge_id is None:
edge_id = next(self.edge_id)
while str(edge_id) in self.all_edge_ids:
edge_id = next(self.edge_id)
self.all_edge_ids.add(str(edge_id))
yield u, v, edge_id, edge_data
edges_element = Element("edges")
for u, v, key, edge_data in edge_key_data(G):
kw = {"id": str(key)}
try:
edge_label = edge_data.pop("label")
kw["label"] = str(edge_label)
except KeyError:
pass
try:
edge_weight = edge_data.pop("weight")
kw["weight"] = str(edge_weight)
except KeyError:
pass
try:
edge_type = edge_data.pop("type")
kw["type"] = str(edge_type)
except KeyError:
pass
try:
start = edge_data.pop("start")
kw["start"] = str(start)
self.alter_graph_mode_timeformat(start)
except KeyError:
pass
try:
end = edge_data.pop("end")
kw["end"] = str(end)
self.alter_graph_mode_timeformat(end)
except KeyError:
pass
source_id = str(G.nodes[u].get("id", u))
target_id = str(G.nodes[v].get("id", v))
edge_element = Element("edge", source=source_id, target=target_id, **kw)
default = G.graph.get("edge_default", {})
if self.VERSION == "1.1":
edge_data = self.add_slices(edge_element, edge_data)
else:
edge_data = self.add_spells(edge_element, edge_data)
edge_data = self.add_viz(edge_element, edge_data)
edge_data = self.add_attributes("edge", edge_element, edge_data, default)
edges_element.append(edge_element)
graph_element.append(edges_element)
def add_attributes(self, node_or_edge, xml_obj, data, default):
# Add attrvalues to node or edge
attvalues = Element("attvalues")
if len(data) == 0:
return data
mode = "static"
for k, v in data.items():
# rename generic multigraph key to avoid any name conflict
if k == "key":
k = "networkx_key"
val_type = type(v)
if val_type not in self.xml_type:
raise TypeError(f"attribute value type is not allowed: {val_type}")
if isinstance(v, list):
# dynamic data
for val, start, end in v:
val_type = type(val)
if start is not None or end is not None:
mode = "dynamic"
self.alter_graph_mode_timeformat(start)
self.alter_graph_mode_timeformat(end)
break
attr_id = self.get_attr_id(
str(k), self.xml_type[val_type], node_or_edge, default, mode
)
for val, start, end in v:
e = Element("attvalue")
e.attrib["for"] = attr_id
e.attrib["value"] = str(val)
# Handle nan, inf, -inf differently
if val_type == float:
if e.attrib["value"] == "inf":
e.attrib["value"] = "INF"
elif e.attrib["value"] == "nan":
e.attrib["value"] = "NaN"
elif e.attrib["value"] == "-inf":
e.attrib["value"] = "-INF"
if start is not None:
e.attrib["start"] = str(start)
if end is not None:
e.attrib["end"] = str(end)
attvalues.append(e)
else:
# static data
mode = "static"
attr_id = self.get_attr_id(
str(k), self.xml_type[val_type], node_or_edge, default, mode
)
e = Element("attvalue")
e.attrib["for"] = attr_id
if isinstance(v, bool):
e.attrib["value"] = str(v).lower()
else:
e.attrib["value"] = str(v)
# Handle float nan, inf, -inf differently
if val_type == float:
if e.attrib["value"] == "inf":
e.attrib["value"] = "INF"
elif e.attrib["value"] == "nan":
e.attrib["value"] = "NaN"
elif e.attrib["value"] == "-inf":
e.attrib["value"] = "-INF"
attvalues.append(e)
xml_obj.append(attvalues)
return data
def get_attr_id(self, title, attr_type, edge_or_node, default, mode):
# find the id of the attribute or generate a new id
try:
return self.attr[edge_or_node][mode][title]
except KeyError:
# generate new id
new_id = str(next(self.attr_id))
self.attr[edge_or_node][mode][title] = new_id
attr_kwargs = {"id": new_id, "title": title, "type": attr_type}
attribute = Element("attribute", **attr_kwargs)
# add subelement for data default value if present
default_title = default.get(title)
if default_title is not None:
default_element = Element("default")
default_element.text = str(default_title)
attribute.append(default_element)
# new insert it into the XML
attributes_element = None
for a in self.graph_element.findall("attributes"):
# find existing attributes element by class and mode
a_class = a.get("class")
a_mode = a.get("mode", "static")
if a_class == edge_or_node and a_mode == mode:
attributes_element = a
if attributes_element is None:
# create new attributes element
attr_kwargs = {"mode": mode, "class": edge_or_node}
attributes_element = Element("attributes", **attr_kwargs)
self.graph_element.insert(0, attributes_element)
attributes_element.append(attribute)
return new_id
def add_viz(self, element, node_data):
viz = node_data.pop("viz", False)
if viz:
color = viz.get("color")
if color is not None:
if self.VERSION == "1.1":
e = Element(
f"{{{self.NS_VIZ}}}color",
r=str(color.get("r")),
g=str(color.get("g")),
b=str(color.get("b")),
)
else:
e = Element(
f"{{{self.NS_VIZ}}}color",
r=str(color.get("r")),
g=str(color.get("g")),
b=str(color.get("b")),
a=str(color.get("a")),
)
element.append(e)
size = viz.get("size")
if size is not None:
e = Element(f"{{{self.NS_VIZ}}}size", value=str(size))
element.append(e)
thickness = viz.get("thickness")
if thickness is not None:
e = Element(f"{{{self.NS_VIZ}}}thickness", value=str(thickness))
element.append(e)
shape = viz.get("shape")
if shape is not None:
if shape.startswith("http"):
e = Element(
f"{{{self.NS_VIZ}}}shape", value="image", uri=str(shape)
)
else:
e = Element(f"{{{self.NS_VIZ}}}shape", value=str(shape))
element.append(e)
position = viz.get("position")
if position is not None:
e = Element(
f"{{{self.NS_VIZ}}}position",
x=str(position.get("x")),
y=str(position.get("y")),
z=str(position.get("z")),
)
element.append(e)
return node_data
def add_parents(self, node_element, node_data):
parents = node_data.pop("parents", False)
if parents:
parents_element = Element("parents")
for p in parents:
e = Element("parent")
e.attrib["for"] = str(p)
parents_element.append(e)
node_element.append(parents_element)
return node_data
def add_slices(self, node_or_edge_element, node_or_edge_data):
slices = node_or_edge_data.pop("slices", False)
if slices:
slices_element = Element("slices")
for start, end in slices:
e = Element("slice", start=str(start), end=str(end))
slices_element.append(e)
node_or_edge_element.append(slices_element)
return node_or_edge_data
def add_spells(self, node_or_edge_element, node_or_edge_data):
spells = node_or_edge_data.pop("spells", False)
if spells:
spells_element = Element("spells")
for start, end in spells:
e = Element("spell")
if start is not None:
e.attrib["start"] = str(start)
self.alter_graph_mode_timeformat(start)
if end is not None:
e.attrib["end"] = str(end)
self.alter_graph_mode_timeformat(end)
spells_element.append(e)
node_or_edge_element.append(spells_element)
return node_or_edge_data
def alter_graph_mode_timeformat(self, start_or_end):
# If 'start' or 'end' appears, alter Graph mode to dynamic and
# set timeformat
if self.graph_element.get("mode") == "static":
if start_or_end is not None:
if isinstance(start_or_end, str):
timeformat = "date"
elif isinstance(start_or_end, float):
timeformat = "double"
elif isinstance(start_or_end, int):
timeformat = "long"
else:
raise nx.NetworkXError(
"timeformat should be of the type int, float or str"
)
self.graph_element.set("timeformat", timeformat)
self.graph_element.set("mode", "dynamic")
def write(self, fh):
# Serialize graph G in GEXF to the open fh
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
document.write(fh, encoding=self.encoding, xml_declaration=True)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + " " * level
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i |
import argparse
from anagram_matcher import AnagramMatcher
parser = argparse.ArgumentParser()
parser.add_argument('-A', "--anagram", type= str)
parser.add_argument('-E', '--encoded_messages', nargs='+')
parser.add_argument('-W', '--wordlist', type=str)
args = parser.parse_args()
anagram = args.anagram
encoded_messages= args.encoded_messages
wordlist_filepath = args.wordlist
print(anagram)
"""
if __name__ == "__main__":
self = AnagramMatcher(wordlist_filepath, anagram, encoded_messages) """ |
import sublime, sublime_plugin
import webbrowser
CHECKOUT = "mozilla-central" # maybe make this configurable?
BASE = "https://searchfox.org/" + CHECKOUT
PATH_MARKER = "@"
REGEXP_MARKER = "rrr"
SELECTION = 1
PATH = 2
QUERY = 3
def get_url(text, t):
if t == SELECTION:
return "{}/search?q={}".format(BASE, text)
if t == PATH:
return "{}/source/{}".format(BASE, text)
if t == QUERY:
q, _, path = text.partition(PATH_MARKER)
regexp = q.startswith(REGEXP_MARKER)
if regexp:
q = q.split(REGEXP_MARKER).pop()
url = "{}/search?q={}&path={}".format(BASE, q.strip(), path.strip())
if regexp:
url += "®exp=true"
return url
def open_search_tab(text, t):
webbrowser.open(get_url(text, t), new=2, autoraise=True)
class SearchfoxSelectionCommand(sublime_plugin.TextCommand):
def run(self, edit):
for sel in self.view.sel():
if not sel.empty():
open_search_tab(self.view.substr(sel), SELECTION)
class SearchfoxPathCommand(sublime_plugin.TextCommand):
def run(self, edit):
path = self.view.file_name().split(CHECKOUT).pop()
row = 0
for sel in self.view.sel():
row, _ = self.view.rowcol(sel.begin())
break
if row != 0:
path += "#" + str(row)
open_search_tab(path, PATH)
class SearchfoxQueryCommand(sublime_plugin.WindowCommand):
def run(self):
self.window.show_input_panel(
"Search %s:" % CHECKOUT, "", self._on_done, self._on_change, self._on_cancel
)
def _on_done(self, input):
open_search_tab(input, QUERY)
def _on_change(self, input):
pass
def _on_cancel(self, input):
pass
|
#
# SPDX-License-Identifier: Apache-2.0
#
# Copyright 2020 Andrey Pleshakov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING, Protocol
if TYPE_CHECKING:
from typing import Sequence
from thespiae.conf import AppEntry
from .data import DownloadSpec
class Feedback(Protocol):
@abstractmethod
def report_checking_software(self) -> None:
raise NotImplementedError
@abstractmethod
def confirm_operations(self, to_download: Sequence[DownloadSpec], to_uninstall: Sequence[AppEntry],
to_install: Sequence[AppEntry]) -> None:
raise NotImplementedError
@abstractmethod
def report_software_set_no_changes(self) -> None:
raise NotImplementedError
@abstractmethod
def report_download_started(self) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_download_initiated(self, spec: DownloadSpec) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_download_started(self, spec: DownloadSpec, current_size: int, total_size: int = None) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_download_progress(self, spec: DownloadSpec, batch_size: int) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_download_finished(self, spec: DownloadSpec) -> None:
raise NotImplementedError
@abstractmethod
def report_download_finished(self) -> None:
raise NotImplementedError
@abstractmethod
def report_removal_started(self) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_removal_started(self, entry: AppEntry) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_removal_finished(self, entry: AppEntry) -> None:
raise NotImplementedError
@abstractmethod
def report_removal_finished(self) -> None:
raise NotImplementedError
@abstractmethod
def report_installation_started(self) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_installation_started(self, entry: AppEntry) -> None:
raise NotImplementedError
@abstractmethod
def report_entry_installation_finished(self, entry: AppEntry) -> None:
raise NotImplementedError
@abstractmethod
def report_installation_finished(self) -> None:
raise NotImplementedError
|
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------
# Copyright Commvault Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""File for operating on a Salesforce Backupset.
SalesforceBackupset is the only class defined in this file.
SalesforceBackuset: Derived class from CloudAppsBackupset Base class, representing a
salesforce backupset, and to perform operations on that backupset
SalesforceBackupset:
__init__() -- Backupset class method overwritten to add salesforce
browse options in default browse options
_get_backupset_properties() -- Backupset class method overwritten to add salesforce
backupset properties as well
_prepare_browse_json() -- Backupset class method overwritten to add salesforce
browse option
download_cache_path() -- Fetches download cache path from backupset
mutual_auth_path() -- Fetches mutual auth path from backupset
salesforce_user_name() -- Fetches salesforce user name from backupset
is_sync_db_enabled() -- Determines sync database enabled or not on backupset
sync_db_type() -- Fetches sync database type from backupset
sync_db_host() -- Fetches sync database host name from backupset
sync_db_instance() -- Fetches ssync database instance name from backupset
sync_db_name() -- Fetches sync database name from backupset
sync_db_port() -- Fetches sync database port number from backupset
sync_db_user_name() -- Fetches sync database user name from backupset
"""
from __future__ import unicode_literals
from ..cabackupset import CloudAppsBackupset
class SalesforceBackupset(CloudAppsBackupset):
"""Derived class from CloudAppsBackupset Base class, representing a
salesforce backupset, and to perform operations on that backupset.
"""
def __init__(self, instance_object, backupset_name, backupset_id=None):
"""Initlializes instance of the Backupset class for the Salesforce instance.
Args:
instance_object (object) -- instance of the Instance class
backupset_name (str) -- name of backupset
backupset_id (int) -- id of backupset
Returns:
object - instance of the SalesforceBackupset class
"""
self._download_cache_path = None
self._mutual_auth_path = None
self._user_name = None
self._api_token = None
self._sync_db_enabled = None
self._sync_db_type = None
self._sync_db_host = None
self._sync_db_instance = None
self._sync_db_name = None
self._sync_db_port = None
self._sync_db_user_name = None
self._sync_db_user_password = None
super(SalesforceBackupset, self).__init__(instance_object, backupset_name, backupset_id)
salesforce_browse_options = {
'_browse_view_name_list': ['TBLVIEW', 'FILEVIEW']
}
self._default_browse_options.update(salesforce_browse_options)
def _get_backupset_properties(self):
"""Gets the properties of this backupset.
Raises:
SDKException:
if response is empty
if response is not success
"""
super(SalesforceBackupset, self)._get_backupset_properties()
if 'cloudAppsBackupset' in self._properties:
cloud_apps_backupset = self._properties['cloudAppsBackupset']
if 'salesforceBackupSet' in cloud_apps_backupset:
sfbackupset = cloud_apps_backupset['salesforceBackupSet']
if 'downloadCachePath' in sfbackupset:
self._download_cache_path = sfbackupset['downloadCachePath']
self._mutual_auth_path = sfbackupset.get('mutualAuthPath', '')
if 'userName' in sfbackupset['userPassword']:
self._user_name = sfbackupset['userPassword']['userName']
if 'syncDatabase' in sfbackupset:
self._sync_db_enabled = sfbackupset['syncDatabase'].get('dbEnabled', False)
if self._sync_db_enabled:
if 'dbType' in sfbackupset['syncDatabase']:
self._sync_db_type = sfbackupset['syncDatabase']['dbType']
if 'dbHost' in sfbackupset['syncDatabase']:
self._sync_db_host = sfbackupset['syncDatabase']['dbHost']
if 'dbInstance' in sfbackupset['syncDatabase']:
self._sync_db_instance = sfbackupset['syncDatabase']['dbInstance']
if 'dbName' in sfbackupset['syncDatabase']:
self._sync_db_name = sfbackupset['syncDatabase']['dbName']
if 'dbPort' in sfbackupset['syncDatabase']:
self._sync_db_port = sfbackupset['syncDatabase']['dbPort']
if 'userName' in sfbackupset['syncDatabase']['dbUserPassword']:
self._sync_db_user_name = sfbackupset[
'syncDatabase']['dbUserPassword']['userName']
if 'password' in sfbackupset['syncDatabase']['dbUserPassword']:
self._sync_db_user_password = sfbackupset[
'syncDatabase']['dbUserPassword']['password']
def _prepare_browse_json(self, options):
"""Prepares the JSON object for the browse request.
Args:
options (dict) -- the browse options dictionary
Returns:
dict - A JSON object for the browse response
"""
request_json = super(SalesforceBackupset, self)._prepare_browse_json(options)
salesforce_browse_view = {
'browseViewNameList': options['_browse_view_name_list']
}
request_json['advOptions'].update(salesforce_browse_view)
return request_json
@property
def download_cache_path(self):
"""getter for download cache path"""
return self._download_cache_path
@property
def mutual_auth_path(self):
"""getter for download cache path"""
return self._mutual_auth_path
@property
def salesforce_user_name(self):
"""getter for salesforce user name"""
return self._user_name
@property
def is_sync_db_enabled(self):
"""lets the user know whether sync db enabled or not"""
return self._sync_db_enabled
@property
def sync_db_type(self):
"""getter for the sync database type"""
return self._sync_db_type
@property
def sync_db_host(self):
"""getter for the sync database hostname"""
return self._sync_db_host
@property
def sync_db_instance(self):
"""getter for the sync database instance name"""
return self._sync_db_instance
@property
def sync_db_name(self):
"""getter for the sync database name"""
return self._sync_db_name
@property
def sync_db_port(self):
"""getter for the sync database port number"""
return self._sync_db_port
@property
def sync_db_user_name(self):
"""getter for the sync database user name"""
return self._sync_db_user_name
@mutual_auth_path.setter
def mutual_auth_path(self, value):
"""Sets mutual auth path for the backupset.
Args:
value (str) -- mutual auth certificate path on access node
"""
if self.mutual_auth_path != value:
if self.is_sync_db_enabled:
del self._properties['cloudAppsBackupset']['salesforceBackupSet']['syncDatabase']['dbUserPassword'][
'password']
self._properties['cloudAppsBackupset']['salesforceBackupSet']['mutualAuthPath'] = value
self.update_properties(self._properties)
|
def in_array(array1, array2):
result=[]
for word_one in set(array1):
for word_two in array2:
if word_one in word_two:
result.append(word_one)
break
result.sort()
return result |
# Tudor Berariu, 2016
import math
from random import randint
from sys import argv
from zipfile import ZipFile
import matplotlib.markers
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from scipy.cluster import hierarchy
from scipy.spatial.distance import euclidean
def getArchive():
archive_url = "http://www.uni-marburg.de/fb12/datenbionik/downloads/FCPS"
local_archive = "FCPS.zip"
from os import path
if not path.isfile(local_archive):
import urllib
print("downloading...")
urllib.urlretrieve(archive_url, filename=local_archive)
assert path.isfile(local_archive)
print("got the archive")
return ZipFile(local_archive)
def getDataSet(archive, dataSetName):
path = "FCPS/01FCPSdata/" + dataSetName
lrnFile = path + ".lrn"
with archive.open(lrnFile, "r") as f: # open .lrn file
N = int(f.readline().decode("UTF-8").split()[1]) # number of examples
D = int(f.readline().decode("UTF-8").split()[1]) - 1 # number of columns
f.readline() # skip the useless line
f.readline() # skip columns' names
Xs = np.zeros([N, D])
for i in range(N):
data = f.readline().decode("UTF-8").strip().split("\t")
assert len(data) == (D + 1) # check line
assert int(data[0]) == (i + 1)
Xs[i] = np.array(list(map(float, data[1:])))
clsFile = path + ".cls"
with archive.open(clsFile, "r") as f: # open.cls file
labels = np.zeros(N).astype("uint")
line = f.readline().decode("UTF-8")
while line.startswith("%"): # skip header
line = f.readline().decode("UTF-8")
i = 0
while line and i < N:
data = line.strip().split("\t")
assert len(data) == 2
assert int(data[0]) == (i + 1)
labels[i] = int(data[1])
line = f.readline().decode("UTF-8")
i = i + 1
assert i == N
return Xs, labels # return data and correct classes
def dist(a, b, ax=1):
return np.linalg.norm(a - b, axis=ax)
def dummy(Xs):
(N, D) = Xs.shape
Z = np.zeros((N - 1, 4))
lastIndex = 0
for i in range(N - 1):
Z[i, 0] = lastIndex
Z[i, 1] = i + 1
Z[i, 2] = 0.1 + i
Z[i, 3] = i + 2
lastIndex = N + i
return Z
def singleLinkage(Xs):
(N, D) = Xs.shape
Z = np.zeros((N - 1, 4))
clusters = []
i = 0
# init
for x in Xs:
clusters.append([i, [x]])
i += 1
for i in range(N - 1):
dmin = math.inf
clust_idx1 = -1
clust_idx2 = -1
for cluster_idx, cluster in enumerate(clusters):
for point in cluster[1]:
for cluster_idx2, cluster2 in enumerate(clusters[(cluster_idx + 1) :]):
for point2 in cluster2[1]:
if euclidean(point, point2) < dmin:
dmin = euclidean(point, point2)
clust_idx1 = cluster_idx
clust_idx2 = cluster_idx2 + cluster_idx + 1
idx1 = clusters[clust_idx1][0]
idx2 = clusters[clust_idx2][0]
clust1_points = clusters[clust_idx1][1]
clust2_points = clusters[clust_idx2][1]
clust1_points.extend(clust2_points)
clusters[clust_idx1][0] = N + i
clusters.pop(clust_idx2)
Z[i, 0] = idx1
Z[i, 1] = idx2
Z[i, 2] = dmin
Z[i, 3] = len(clust1_points)
return Z
def completeLinkage(Xs):
(N, D) = Xs.shape
Z = np.zeros((N - 1, 4))
clusters = []
i = 0
# init
for x in Xs:
clusters.append([i, [x]])
i += 1
for i in range(N - 1):
dmin = math.inf
clust_idx1 = -1
clust_idx2 = -1
for cluster_idx, cluster in enumerate(clusters):
dmax = -math.inf
for point in cluster[1]:
for cluster_idx2, cluster2 in enumerate(clusters[(cluster_idx + 1) :]):
for point2 in cluster2[1]:
dist = euclidean(point, point2)
if dist < dmin:
dmin = dist
if dist > dmax:
dmax = dist
clust_idx1 = cluster_idx
clust_idx2 = cluster_idx2 + cluster_idx + 1
idx1 = clusters[clust_idx1][0]
idx2 = clusters[clust_idx2][0]
clust1_points = clusters[clust_idx1][1]
clust2_points = clusters[clust_idx2][1]
clust1_points.extend(clust2_points)
clusters[clust_idx1][0] = N + i
clusters.pop(clust_idx2)
Z[i, 0] = idx1
Z[i, 1] = idx2
Z[i, 2] = dmin
Z[i, 3] = len(clust1_points)
return Z
def groupAverageLinkage(Xs):
(N, D) = Xs.shape
Z = np.zeros((N - 1, 4))
clusters = []
i = 0
# init
for x in Xs:
clusters.append([i, [x]])
i += 1
for i in range(N - 1):
dmin = math.inf
clust_idx1 = -1
clust_idx2 = -1
dist = {}
for cluster_idx, cluster in enumerate(clusters):
dist[cluster_idx] = {}
for cluster_idx2, cluster2 in enumerate(clusters[(cluster_idx + 1) :]):
dist[cluster_idx][cluster_idx + cluster_idx2 + 1] = 0
for cluster_idx, cluster in enumerate(clusters):
for point in cluster[1]:
for cluster_idx2, cluster2 in enumerate(clusters[(cluster_idx + 1) :]):
for point2 in cluster2[1]:
dist[cluster_idx][cluster_idx + cluster_idx2 + 1] += (
1 / (len(cluster) * len(cluster2)) * euclidean(point, point2)
)
for cluster_idx, cluster in enumerate(clusters):
for cluster_idx2, cluster2 in enumerate(clusters[(cluster_idx + 1) :]):
d = dist[cluster_idx][cluster_idx + cluster_idx2 + 1]
if d < dmin:
dmin = d
clust_idx1 = cluster_idx
clust_idx2 = cluster_idx + cluster_idx2 + 1
idx1 = clusters[clust_idx1][0]
idx2 = clusters[clust_idx2][0]
clust1_points = clusters[clust_idx1][1]
clust2_points = clusters[clust_idx2][1]
clust1_points.extend(clust2_points)
clusters[clust_idx1][0] = N + i
clusters.pop(clust_idx2)
Z[i, 0] = idx1
Z[i, 1] = idx2
Z[i, 2] = dmin
Z[i, 3] = len(clust1_points)
return Z
def extractClusters(Xs, Z):
(N, D) = Xs.shape
assert Z.shape == (N - 1, 4)
# TODO 4
# return 1, np.zeros(N)
return 1, np.zeros(N).astype(int)
def randIndex(clusters, labels):
assert labels.size == clusters.size
N = clusters.size
a = 0.0
b = 0.0
for (i, j) in [(i, j) for i in range(N) for j in range(i + 1, N) if i < j]:
if (
(clusters[i] == clusters[j])
and (labels[i] == labels[j])
or (clusters[i] != clusters[j])
and (labels[i] != labels[j])
):
a = a + 1
b = b + 1
return float(a) / float(b)
def plot(Xs, labels, K, clusters):
labelsNo = np.max(labels)
markers = [] # get the different markers
while len(markers) < labelsNo:
markers.extend(list(matplotlib.markers.MarkerStyle.filled_markers))
colors = plt.cm.rainbow(np.linspace(0, 1, K + 1))
if Xs.shape[1] == 2:
x = Xs[:, 0]
y = Xs[:, 1]
for (_x, _y, _c, _l) in zip(x, y, clusters, labels):
plt.scatter(_x, _y, s=200, c=[colors[_c]], marker=markers[_l])
plt.show()
elif Xs.shape[1] == 3:
x = Xs[:, 0]
y = Xs[:, 1]
z = Xs[:, 2]
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for (_x, _y, _z, _c, _l) in zip(x, y, z, clusters, labels):
ax.scatter(_x, _y, _z, s=200, c=[colors[_c]], marker=markers[_l])
plt.show()
if __name__ == "__main__":
if len(argv) < 2:
print("Usage: " + argv[0] + " dataset_name")
exit()
Xs, labels = getDataSet(getArchive(), argv[1]) # Xs is NxD, labels is Nx1
# Z = singleLinkage(Xs)
Z = completeLinkage(Xs)
# Z = groupAverageLinkage(Xs)
# plt.figure()
dn = hierarchy.dendrogram(Z)
# plt.show()
K, clusters = extractClusters(Xs, Z)
print("randIndex: ", randIndex(clusters, labels))
plot(Xs, labels, K, clusters)
|
# Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
"""asyncio library query support"""
import socket
import asyncio
import dns._asyncbackend
import dns.exception
def _get_running_loop():
try:
return asyncio.get_running_loop()
except AttributeError: # pragma: no cover
return asyncio.get_event_loop()
class _DatagramProtocol:
def __init__(self):
self.transport = None
self.recvfrom = None
def connection_made(self, transport):
self.transport = transport
def datagram_received(self, data, addr):
if self.recvfrom:
self.recvfrom.set_result((data, addr))
self.recvfrom = None
def error_received(self, exc): # pragma: no cover
if self.recvfrom and not self.recvfrom.done():
self.recvfrom.set_exception(exc)
def connection_lost(self, exc):
if self.recvfrom and not self.recvfrom.done():
self.recvfrom.set_exception(exc)
def close(self):
self.transport.close()
async def _maybe_wait_for(awaitable, timeout):
if timeout:
try:
return await asyncio.wait_for(awaitable, timeout)
except asyncio.TimeoutError:
raise dns.exception.Timeout(timeout=timeout)
else:
return await awaitable
class DatagramSocket(dns._asyncbackend.DatagramSocket):
def __init__(self, family, transport, protocol):
self.family = family
self.transport = transport
self.protocol = protocol
async def sendto(self, what, destination, timeout): # pragma: no cover
# no timeout for asyncio sendto
self.transport.sendto(what, destination)
async def recvfrom(self, size, timeout):
# ignore size as there's no way I know to tell protocol about it
done = _get_running_loop().create_future()
assert self.protocol.recvfrom is None
self.protocol.recvfrom = done
await _maybe_wait_for(done, timeout)
return done.result()
async def close(self):
self.protocol.close()
async def getpeername(self):
return self.transport.get_extra_info('peername')
async def getsockname(self):
return self.transport.get_extra_info('sockname')
class StreamSocket(dns._asyncbackend.DatagramSocket):
def __init__(self, af, reader, writer):
self.family = af
self.reader = reader
self.writer = writer
async def sendall(self, what, timeout):
self.writer.write(what)
return await _maybe_wait_for(self.writer.drain(), timeout)
async def recv(self, count, timeout):
return await _maybe_wait_for(self.reader.read(count),
timeout)
async def close(self):
self.writer.close()
try:
await self.writer.wait_closed()
except AttributeError: # pragma: no cover
pass
async def getpeername(self):
return self.writer.get_extra_info('peername')
async def getsockname(self):
return self.writer.get_extra_info('sockname')
class Backend(dns._asyncbackend.Backend):
def name(self):
return 'asyncio'
async def make_socket(self, af, socktype, proto=0,
source=None, destination=None, timeout=None,
ssl_context=None, server_hostname=None):
loop = _get_running_loop()
if socktype == socket.SOCK_DGRAM:
transport, protocol = await loop.create_datagram_endpoint(
_DatagramProtocol, source, family=af,
proto=proto)
return DatagramSocket(af, transport, protocol)
elif socktype == socket.SOCK_STREAM:
(r, w) = await _maybe_wait_for(
asyncio.open_connection(destination[0],
destination[1],
ssl=ssl_context,
family=af,
proto=proto,
local_addr=source,
server_hostname=server_hostname),
timeout)
return StreamSocket(af, r, w)
raise NotImplementedError('unsupported socket ' +
f'type {socktype}') # pragma: no cover
async def sleep(self, interval):
await asyncio.sleep(interval)
|
#class SVNRepo:
# @classmethod
# def isBadVersion(cls, id)
# # Run unit tests to check whether verison `id` is a bad version
# # return true if unit tests passed else false.
# You can use SVNRepo.isBadVersion(10) to check whether version 10 is a
# bad version.
class Solution:
"""
@param n: An integers.
@return: An integer which is the first bad version.
"""
def findFirstBadVersion(self, n):
# write your code here
start, end = 1, n
if (n == 1):
return 1
while (start <= end):
i = (start + end) / 2
if (not SVNRepo.isBadVersion(i)):
start = i + 1
else:
end = i - 1
return start
|
from pytest import mark, raises
import quirinius as qu
import numpy as np
import warnings
pctl50 = np.array([.5])
class Test_ValAtQtl:
def test_bounds(self):
nvals = 11
vals = np.linspace(0., 1., nvals)
cumul_qtl = (np.linspace(1., nvals, nvals) - 0.5) / nvals
qtl = np.array([0., 1.])
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message='numpy.ufunc size changed',
category=RuntimeWarning)
vaqs = qu.val_at_qtl_(vals, cumul_qtl, qtl)
assert np.isnan(vaqs).sum() == 2
def test_exact(self):
nvals = 11
vals = np.linspace(0., 1., nvals)
cumul_qtl = (np.linspace(1., nvals, nvals) - 0.5) / nvals
qtl = np.array([0.5])
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message='numpy.ufunc size changed',
category=RuntimeWarning)
q = qu.val_at_qtl_(vals, cumul_qtl, qtl).squeeze()
assert np.isclose(q, np.median(q))
class Test_wq_:
pass
class Test_wq:
pass
|
import subprocess
import sys
import pkg_resources
try:
pkg_resources.get_distribution("httpx")
except pkg_resources.DistributionNotFound:
hyper_dist = "hyper@https://github.com/Lukasa/hyper/archive/development.tar.gz"
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", hyper_dist, 'httpx'])
if pkg_resources.get_distribution("h2").version < '4':
subprocess.check_call([sys.executable, "-m", "pip", "install", "--upgrade", 'h2'])
import unittest
from http.client import OK, NOT_FOUND
from typing import List
import jsondiff
from httpx import Client, Response
from tests.test_settings import TEST_TUNNEL_HOST
from tests.test_utils import get_test_adapter_options
from tests.validation_data import HOWSMYSSL_VALIDATION_RESPONSE
from tcp_tls_tunnel.httpx_adapter import TunnelHTTPTransport
class TestTunnelHttpxHttp20Request(unittest.TestCase):
def setUp(self) -> None:
self.transport = TunnelHTTPTransport(
adapter_opts=get_test_adapter_options()
)
def test_tunnel_ip_request(self):
with Client(transport=self.transport) as client:
response: Response = client.get("https://api.myip.com/")
self.assertEqual(response.status_code, OK)
response_json: dict = response.json()
self.assertEqual(response_json.get("ip"), TEST_TUNNEL_HOST)
def test_tunnel_ssl_request(self):
with Client(transport=self.transport) as client:
response: Response = client.get('https://www.howsmyssl.com/a/check')
response_json: dict = response.json()
diff: dict = jsondiff.diff(HOWSMYSSL_VALIDATION_RESPONSE, response_json)
given_cipher_suites: List[str] = diff["given_cipher_suites"]
self.assertEqual(len(given_cipher_suites[jsondiff.symbols.insert]), 1,
msg="[given_cipher_suites] TLS_GREASE_IS INSERT parameter check failed.")
self.assertEqual(len(given_cipher_suites[jsondiff.symbols.delete]), 1,
msg="[given_cipher_suites] TLS_GREASE_IS DELETE parameter check failed.")
def test_several_tunnel_requests(self):
with Client(transport=self.transport) as client:
for url in ["https://www.howsmyssl.com/",
"https://www.howsmyssl.com/s/api.html",
"https://www.howsmyssl.com/s/about.html"]:
response: Response = client.get(url)
self.assertEqual(response.status_code, OK)
failed_response: Response = client.get("https://www.howsmyssl.com/s/api")
self.assertEqual(failed_response.status_code, NOT_FOUND)
def test_http2_tunnel_request(self):
with Client(transport=self.transport) as client:
response: Response = client.get("https://http2.pro/api/v1")
response_json: dict = response.json()
self.assertEqual(response_json.get("http2"), 1)
self.assertEqual(response_json.get("protocol"), 'HTTP/2.0')
def test_http_without_tls_tunnel_request(self):
with Client(transport=self.transport) as client:
response: Response = client.get("http://httpbin.org/get")
response_json: dict = response.json()
self.assertEqual(response.status_code, OK)
self.assertEqual(response_json.get("origin"), TEST_TUNNEL_HOST) |
# License: license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_map(filters)
data = []
bal_kg, bal_packets, bal_bags = "","",""
for (company, item, warehouse) in sorted(iwb_map):
qty_dict = iwb_map[(company, item, warehouse)]
# Calculate UOM Table for NPPL
bal_kg = qty_dict.bal_qty if item_map[item]["stock_uom"] == "Kg" \
else convert_to_uom(item, qty_dict.bal_qty, item_map[item]["stock_uom"], "Kg")
bal_packets = qty_dict.bal_qty if item_map[item]["stock_uom"] == "Packets" \
else convert_to_uom(item, qty_dict.bal_qty, item_map[item]["stock_uom"], "Packets")
bal_bags = qty_dict.bal_qty if item_map[item]["stock_uom"] == "Bags" \
else convert_to_uom(item, qty_dict.bal_qty, item_map[item]["stock_uom"], "Bags")
data.append([item, item_map[item]["item_name"],
item_map[item]["item_group"],
item_map[item]["brand"],
item_map[item]["description"], warehouse,
item_map[item]["stock_uom"], qty_dict.opening_qty,
qty_dict.opening_val, qty_dict.in_qty,
qty_dict.in_val, qty_dict.out_qty,
qty_dict.out_val, qty_dict.bal_qty,
bal_kg, bal_packets, bal_bags,
qty_dict.bal_val, qty_dict.val_rate,
company
])
return columns, data
def get_columns(filters):
"""return columns based on filters"""
columns = [
_("Item")+":Link/Item:100",
_("Item Name")+"::150",
_("Item Group")+"::100",
_("Brand")+"::90",
_("Description")+"::140",
_("Warehouse")+":Link/Warehouse:100",
_("Stock UOM")+":Link/UOM:90",
_("Opening Qty")+":Float:100",
_("Opening Value")+":Float:110",
_("In Qty")+":Float:80",
_("In Value")+":Float:80",
_("Out Qty")+":Float:80",
_("Out Value")+":Float:80",
_("Balance Qty")+":Float:100",
_("Kg")+"::100",
_("Packets")+"::100",
_("Bags")+"::100",
_("Balance Value")+":Float:100",
_("Valuation Rate")+":Float:90",
_("Company")+":Link/Company:100"
]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % frappe.db.escape(filters["to_date"])
else:
frappe.throw(_("'To Date' is required"))
if filters.get("item_code"):
conditions += " and item_code = '%s'" % frappe.db.escape(filters.get("item_code"), percent=False)
return conditions
#get all details
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select item_code, warehouse, posting_date, actual_qty, valuation_rate,
company, voucher_type, qty_after_transaction, stock_value_difference
from `tabStock Ledger Entry` force index (posting_sort_index)
where docstatus < 2 %s order by posting_date, posting_time, name""" %
conditions, as_dict=1)
def get_item_warehouse_map(filters):
iwb_map = {}
from_date = getdate(filters["from_date"])
to_date = getdate(filters["to_date"])
sle = get_stock_ledger_entries(filters)
for d in sle:
key = (d.company, d.item_code, d.warehouse)
if key not in iwb_map:
iwb_map[key] = frappe._dict({
"opening_qty": 0.0, "opening_val": 0.0,
"in_qty": 0.0, "in_val": 0.0,
"out_qty": 0.0, "out_val": 0.0,
"bal_qty": 0.0, "bal_val": 0.0,
"val_rate": 0.0, "uom": None
})
qty_dict = iwb_map[(d.company, d.item_code, d.warehouse)]
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - qty_dict.bal_qty
else:
qty_diff = flt(d.actual_qty)
value_diff = flt(d.stock_value_difference)
if d.posting_date < from_date:
qty_dict.opening_qty += qty_diff
qty_dict.opening_val += value_diff
elif d.posting_date >= from_date and d.posting_date <= to_date:
if qty_diff > 0:
qty_dict.in_qty += qty_diff
qty_dict.in_val += value_diff
else:
qty_dict.out_qty += abs(qty_diff)
qty_dict.out_val += abs(value_diff)
qty_dict.val_rate = d.valuation_rate
qty_dict.bal_qty += qty_diff
qty_dict.bal_val += value_diff
return iwb_map
def get_item_details(filters):
item_map = {}
for d in frappe.db.sql("select name, item_name, stock_uom, item_group, brand, \
description from tabItem", as_dict=1):
item_map.setdefault(d.name, d)
return item_map
def convert_to_uom(item, qty, from_uom, to_uom):
out = " "
con_rate = get_conversion_rate(item)
if from_uom == "Kg":
if to_uom == "Packets":
out = qty * con_rate.get("to_packets")
elif to_uom == "Bags":
out = qty * con_rate.get("to_bags")
if from_uom == "Packets":
if to_uom == "Kg":
out = qty * con_rate.get("to_kg")
elif to_uom == "Bags":
out = qty * con_rate.get("to_bags")
if from_uom == "Bags":
if to_uom == "Kg":
out = qty * con_rate.get("to_kg")
elif to_uom == "Packets":
out = qty * con_rate.get("to_packets")
return out
def get_conversion_rate(item):
to_kg, to_packets, to_bags = 0,0,0
bom_name = frappe.db.get_value("BOM", {"item":item, "is_default":1}, "name")
quantity = flt(frappe.db.get_value("BOM", {"item":item, "is_default":1}, "quantity"))
qty = flt(frappe.db.get_value("BOM Item", {"parent":bom_name,"idx":1}, "qty"))
if frappe.get_value("Item", {"name":item}, "stock_uom") == "Kg":
to_kg = 1
if quantity and qty:
to_packets = qty / quantity
to_bags = qty * quantity # if any error use that
elif frappe.get_value("Item", {"name":item}, "stock_uom") == "Packets":
to_packets = 1
if quantity and qty:
to_kg = qty / quantity
to_bags = flt(1 / (quantity * qty),4)
elif frappe.get_value("Item", {"name":item}, "stock_uom") == "Bags":
to_bags = 1
if quantity and qty:
to_packets = qty / quantity
to_kg = quantity / qty # use this
out = {
"to_kg": to_kg,
"to_packets": to_packets,
"to_bags": to_bags
}
return out
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains triangles validation implementation
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
import tpDcc as tp
import pyblish.api
class SelectTriangles(pyblish.api.Action):
label = 'Select Triangles'
on = 'failed'
def process(self, context, plugin):
if not tp.is_maya():
self.log.warning('Select Triangles Action is only available in Maya!')
return False
for instance in context:
if not instance.data['publish'] or not instance.data['_has_failed']:
continue
node = instance.data.get('node', None)
assert node and tp.Dcc.object_exists(node), 'No valid node found in current instance: {}'.format(instance)
triangles = instance.data.get('triangles', None)
assert triangles, 'No triangles geometry found in instance: {}'.format(instance)
tp.Dcc.select_object(triangles, replace_selection=False)
class ValidateTriangles(pyblish.api.InstancePlugin):
"""
If one of the geometries is tringulated, we must ensure that the rest of the geometry is also triangulated
"""
label = 'Topology - Triangles'
order = pyblish.api.ValidatorOrder
hosts = ['maya']
families = ['geometry']
optional = False
actions = [SelectTriangles]
def process(self, instance):
import maya.api.OpenMaya as OpenMaya
node = instance.data.get('node', None)
assert tp.Dcc.object_exists(node), 'No valid node found in current instance: {}'.format(instance)
nodes_to_check = self._nodes_to_check(node)
assert nodes_to_check, 'No Nodes to check found!'
meshes_selection_list = OpenMaya.MSelectionList()
for node in nodes_to_check:
meshes_selection_list.add(node)
triangles_found = list()
total_nodes = len(nodes_to_check)
tringulated_meshes = 0
sel_it = OpenMaya.MItSelectionList(meshes_selection_list)
while not sel_it.isDone():
mesh_triangles = list()
face_it = OpenMaya.MItMeshPolygon(sel_it.getDagPath())
object_name = sel_it.getDagPath().getPath()
while not face_it.isDone():
num_of_edges = face_it.getEdges()
if len(num_of_edges) == 3:
face_index = face_it.index()
component_name = '{}.f[{}]'.format(object_name, face_index)
mesh_triangles.append(component_name)
triangles_found.append(component_name)
tringulated_meshes += 1
face_it.next(None)
if mesh_triangles:
self.log.info('Geometry {} has triangles!'.format(object_name))
# assert mesh_triangles, 'Mesh with no triangles found: {}'.format(object_name)
sel_it.next()
if triangles_found:
instance.data['triangles'] = triangles_found
assert tringulated_meshes == total_nodes, 'Not all meshes of {} are triangulated!'.format(instance)
def _nodes_to_check(self, node):
valid_nodes = list()
nodes = tp.Dcc.list_children(node=node, all_hierarchy=True, full_path=True, children_type='transform')
if not nodes:
nodes = [node]
else:
nodes.append(node)
for node in nodes:
shapes = tp.Dcc.list_shapes(node=node, full_path=True)
if not shapes:
continue
valid_nodes.append(node)
return valid_nodes
|
def show_figure(prob_Q_A_left, prob_E_A_left, prob_AD_A_left, prob_Q2_A_left):
import matplotlib.pyplot as plt
plt.ylabel('% left actions from A')
plt.xlabel('Episodes')
x_ticks = np.arange(0, 301, 20)
y_ticks = np.arange(0, 1.1, 0.1)
plt.xticks(x_ticks)
plt.yticks(y_ticks, ['0%', '10%', '20%', '30%', '40%', '50%', '60%', '70%', '80%', '90%', '100%'])
plt.plot(range(300), prob_Q_A_left, '-', label='Q Learning')
plt.plot(range(300), prob_E_A_left, '-', label='Double Q-Learning')
plt.plot(range(300), prob_AD_A_left, '-', label='Action Distribution')
plt.plot(range(300), prob_Q2_A_left, '-', label='Expected Sarsa')
plt.plot(np.ones(300) * 0.05, label='Optimal')
plt.title('Comparison of the effect of 4 algorithms on Ex 6.7')
plt.legend()
plt.grid()
plt.show()
plt.close()
total_num = 1000
A_Q_lst, B_Q_lst = np.zeros((total_num, 300)), np.zeros((total_num, 300))
A_Q2_lst, B_Q2_lst = np.zeros((total_num, 300)), np.zeros((total_num, 300))
A_AD_lst, B_AD_lst = np.zeros((total_num, 300)), np.zeros((total_num, 300))
A_E_lst, B_E_lst = np.zeros((total_num, 300)), np.zeros((total_num, 300))
prob_Q_A_left = np.zeros((total_num, 300))
prob_Q2_A_left = np.zeros((total_num, 300))
prob_AD_A_left = np.zeros((total_num, 300))
prob_E_A_left = np.zeros((total_num, 300))
# 计算在STATE_A下采样动作left的概率
alpha = 0.1
start_epsilon = 0.1
gamma = 0.9
num_of_episode = 300
for num in tqdm(range(total_num)):
_, A_left1, A_Q1, B_Q1 = TD_learning(env, 'Q-Learning', alpha, epsilon_scope=[start_epsilon, 0.05, 1],
num_of_episode=num_of_episode, gamma=gamma)
_, A_left2, A_Q2, B_Q2 = TD_learning(env, 'Double-Q', alpha, epsilon_scope=[start_epsilon, 0.05, 1],
num_of_episode=num_of_episode, gamma=gamma)
_, A_left3, A_Q3, B_Q3 = TD_learning(env, 'Action_Distribution', alpha, epsilon_scope=[start_epsilon, 0.05, 1],
num_of_episode=num_of_episode, gamma=gamma)
_, A_left4, A_Q4, B_Q4 = TD_learning(env, 'Expected_Sarsa', alpha, epsilon_scope=[start_epsilon, 0.05, 1],
num_of_episode=num_of_episode, gamma=gamma)
prob_Q_A_left[int(num)] = A_left1
prob_Q2_A_left[int(num)] = A_left2
prob_AD_A_left[int(num)] = A_left3
prob_E_A_left[int(num)] = A_left4
A_Q_lst[int(num)], B_Q_lst[int(num)] = A_Q1, B_Q1
A_Q2_lst[int(num)], B_Q2_lst[int(num)] = A_Q2, B_Q2
A_AD_lst[int(num)], B_AD_lst[int(num)] = A_Q3, B_Q3
A_E_lst[int(num)], B_E_lst[int(num)] = A_Q4, B_Q4
a = prob_Q_A_left.mean(axis=0)
b = prob_Q2_A_left.mean(axis=0)
c = prob_AD_A_left.mean(axis=0)
d = prob_E_A_left.mean(axis=0)
show_figure(a, b, c, d) |
# -*- coding: utf-8 -*-
"""
This module provides an abstract base class for invocation plugins.
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Standard Library
from abc import ABCMeta, abstractmethod
class BasePlugin(object):
"""
Abstract base class for invocation plugins.
Plugin developers can either derive their objects directly from this class
or from :class:`metaopt.plugin.plugin.DummyPlugin` to only override
methods selectively.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
super(BasePlugin, self).__init__()
@abstractmethod
def setup(self, f, param_spec, return_spec):
"""
Called before the invoker calls the objective function the first time
:param f: Objective function
:param param_spec: Parameter specification
:param return_spec: Return value specification
"""
pass
@abstractmethod
def before_invoke(self, invocation):
"""
Called right before the invoker calls the objective function
:param invocation: Information about the current (and past) invocations
:type invocation: :class:`metaopt.invoker.pluggable.Invocation`
"""
pass
@abstractmethod
def on_invoke(self, invocation):
"""
Called after the invoker called the objective function
Since objective functions are usually called asynchronously `invocation`
will not contain any results yet.
:param invocation: Information about the current (and past) invocations
:type invocation: :class:`metaopt.invoker.pluggable.Invocation`
"""
pass
@abstractmethod
def on_result(self, invocation):
"""
Called when the invocation of the objective function was successful
:param invocation: Information about the current (and past) invocations
:type invocation: :class:`metaopt.invoker.pluggable.Invocation`
"""
pass
@abstractmethod
def on_error(self, invocation):
"""
Called when the invocation of the objective function was not successful
Since the invocation was not successful `invocation` will not contain
any result.
:param invocation: Information about the current (and past) invocations
:type invocation: :class:`metaopt.invoker.pluggable.Invocation`
"""
pass
|
import groups
from base import OcgFunction, OcgCvArgFunction, OcgArgFunction
import numpy as np
from ocgis.util.helpers import iter_array
class SampleSize(OcgFunction):
'''
.. note:: Automatically added by OpenClimateGIS. This should generally not be invoked manually.
n: Statistical sample size.
'''
name = 'n'
description = 'Statistical sample size.'
Group = groups.BasicStatistics
dtype = int
@staticmethod
def _calculate_(values):
ret = np.empty(values.shape[-2:],dtype=int)
ret[:] = values.shape[0]
ret = np.ma.array(ret,mask=values.mask[0,0,:])
return(ret)
@staticmethod
def _aggregate_spatial_(values,weights):
return(np.ma.sum(values))
class Median(OcgFunction):
description = 'Median value for the series.'
Group = groups.BasicStatistics
dtype = float
@staticmethod
def _calculate_(values):
return(np.median(values,axis=0))
class Mean(OcgFunction):
description = 'Mean value for the series.'
Group = groups.BasicStatistics
dtype = float
@staticmethod
def _calculate_(values):
return(np.mean(values,axis=0))
class Max(OcgFunction):
description = 'Max value for the series.'
Group = groups.BasicStatistics
dtype = float
@staticmethod
def _calculate_(values):
return(np.max(values,axis=0))
class Min(OcgFunction):
description = 'Min value for the series.'
Group = groups.BasicStatistics
dtype = float
@staticmethod
def _calculate_(values):
return(np.min(values,axis=0))
class StandardDeviation(OcgFunction):
description = 'Standard deviation for the series.'
Group = groups.BasicStatistics
dtype = float
name = 'std'
@staticmethod
def _calculate_(values):
return(np.std(values,axis=0))
class MaxConsecutive(OcgArgFunction):
name = 'max_cons'
nargs = 2
Group = groups.Thresholds
dtype = int
description = ('Maximum number of consecutive occurrences in the sequence'
' where the logical operation returns TRUE.')
@staticmethod
def _calculate_(values,threshold=None,operation=None):
## time index reference
ref = np.arange(0,values.shape[0])
## storage array for counts
store = np.empty(list(values.shape)[1:])
## perform requested logical operation
if operation == 'gt':
arr = values > threshold
elif operation == 'lt':
arr = values < threshold
elif operation == 'gte':
arr = values >= threshold
elif operation == 'lte':
arr = values <= threshold
## find longest sequence for each geometry across time dimension
for xidx,yidx in iter_array(values[0,:]):
vec = arr[:,xidx,yidx]
# ## collapse data if no axis provided
# if axis is None:
# vec = vec.reshape(-1)
## check first if there is a longer series than 1
if np.any(np.diff(ref[vec]) == 1):
split_idx = ref[np.diff(vec)] + 1
splits = np.array_split(vec,split_idx)
sums = [a.sum() for a in splits if np.all(a)]
fill = np.max(sums)
## case of only a singular occurrence
elif np.any(vec):
fill = 1
## case for no occurrence
else:
fill = 0
store[xidx,yidx] = fill
# ## summarize across geometries if axis is collapsed
# if axis is None:
# store = np.max(store)
return(store)
class Between(OcgArgFunction):
nargs = 2
description = 'Count of values falling within the limits lower and upper (inclusive).'
Group = groups.Thresholds
dtype = int
@staticmethod
def _calculate_(values,lower=None,upper=None):
idx = (values >= lower)*(values <= upper)
return(np.sum(idx,axis=0))
#class FooMulti(OcgCvArgFunction):
# description = 'Meaningless test statistic.'
# Group = groups.MultivariateStatistics
# dtype = float
# nargs = 2
# keys = ['foo','foo2']
#
# @staticmethod
# def _calculate_(foo=None,foo2=None):
# ret = foo + foo2
# ret = 2*ret
# ret = np.mean(ret,axis=0)
# return(ret)
class HeatIndex(OcgCvArgFunction):
description = 'Heat Index following: http://en.wikipedia.org/wiki/Heat_index. If temperature is < 80F or relative humidity is < 40%, the value is masked during calculation. Output units are Fahrenheit.'
Group = groups.MultivariateStatistics
dtype = float
nargs = 2
keys = ['tas','rhs']
name = 'heat_index'
@staticmethod
def _calculate_(tas=None,rhs=None,units=None):
if units == 'k':
tas = 1.8*(tas - 273.15) + 32
else:
raise(NotImplementedError)
c1 = -42.379
c2 = 2.04901523
c3 = 10.14333127
c4 = -0.22475541
c5 = -6.83783e-3
c6 = -5.481717e-2
c7 = 1.22874e-3
c8 = 8.5282e-4
c9 = -1.99e-6
idx = tas < 80
tas.mask = np.logical_or(idx,tas.mask)
idx = rhs < 40
rhs.mask = np.logical_or(idx,rhs.mask)
tas_sq = np.square(tas)
rhs_sq = np.square(rhs)
hi = c1 + c2*tas + c3*rhs + c4*tas*rhs + c5*tas_sq + c6*rhs_sq + \
c7*tas_sq*rhs + c8*tas*rhs_sq + c9*tas_sq*rhs_sq
return(hi)
|
count = 0
for i in range(6):
k = float(input())
if k > 0:
count += 1
print("{0} valores positivos".format(count)) |
#from distutils.core import setup
import setuptools
setuptools.setup(name='target_chembl',
version='0.0.7',
scripts=['patho_chembl/chembldb_pfam_mech.py', 'patho_chembl/chembldb_pfam_assay.py', 'patho_chembl/mol_trg.py', 'patho_chembl/pfam_df_update.py', 'patho_chembl/pfam_mol_assay.py',
'patho_chembl/pfam_mol_mech.py', 'patho_chembl/pfam_trg_sql_assay.py', 'patho_chembl/pfam_trg_sql_mecanism.py', 'patho_chembl/tanimoto.py', 'patho_chembl/trg_mol.py', 'patho_chembl/trg_mol_funcion.py'],
requires=['requests','argparse', 'chembl_webresource_client', 'pandas'],
author='Florencia A. Castello',
license='MIT license',
author_email='[email protected]',
description='Simple interface for ChEMBL DB',
url='https://github.com/florenciacastello/target_chembl',
packages=setuptools.find_packages(),
long_description='',
python_requires='>=3.6'
)
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import unittest
import webapp2
import webtest
from google.appengine.ext import ndb
from dashboard import benchmark_health_report
from dashboard import update_test_suites
from dashboard.common import stored_object
from dashboard.common import testing_common
from dashboard.common import utils
from dashboard.models import anomaly
from dashboard.models import graph_data
class BenchmarkHealthReportTest(testing_common.TestCase):
def setUp(self):
super(BenchmarkHealthReportTest, self).setUp()
app = webapp2.WSGIApplication(
[('/benchmark_health_report',
benchmark_health_report.BenchmarkHealthReportHandler)])
self.testapp = webtest.TestApp(app)
def _AddAnomalyEntities(
self, revision_ranges, test_key, bug_id=None):
"""Adds a group of Anomaly entities to the datastore."""
urlsafe_keys = []
for start_rev, end_rev in revision_ranges:
anomaly_key = anomaly.Anomaly(
start_revision=start_rev, end_revision=end_rev,
test=test_key, bug_id=bug_id,
median_before_anomaly=100, median_after_anomaly=200).put()
urlsafe_keys.append(anomaly_key.urlsafe())
return urlsafe_keys
def _AddTests(self):
"""Adds sample TestMetadata entities and returns their keys."""
testing_common.AddTests(['ChromiumPerf'], ['linux'], {
'sunspider': {
'Total': {},
'ref': {},
},
'page_cycler': {
'load_time': {
'cnn.com': {},
'google.com': {},
}
}
})
tests = graph_data.TestMetadata.query()
for test in tests:
test.improvement_direction = anomaly.DOWN
ndb.put_multi(tests)
def _AddCachedSuites(self):
test_suites = {
'sunspider': {
'mas': {'ChromiumPerf': {'mac': False, 'linux': False}},
'mon': [],
},
'page_cycler': {
'mas': {'ChromiumPerf': {'linux': False}, 'CrOS': {'foo': False}},
'mon': ['load_time'],
},
'speedometer': {
'mas': {'CrOS': {'foo': False, 'bar': False}},
'mon': [],
}
}
key = update_test_suites._NamespaceKey(
update_test_suites._LIST_SUITES_CACHE_KEY)
stored_object.Set(key, test_suites)
def testGet(self):
response = self.testapp.get('/benchmark_health_report')
self.assertEqual('text/html', response.content_type)
self.assertIn('Chrome Performance Dashboard', response.body)
def testPost_MasterArgument_ListsTestsForMaster(self):
self._AddCachedSuites()
response = self.testapp.post(
'/benchmark_health_report', {'master': 'CrOS'})
benchmark_list = self.GetJsonValue(response, 'benchmarks')
self.assertItemsEqual(benchmark_list, [{
'name': 'page_cycler',
'monitored': True,
'bots': ['foo'],
}, {
'name': 'speedometer',
'monitored': False,
'bots': ['bar', 'foo'],
}])
def testPost_BenchmarkArgument_ListsAlertsAndBots(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'page_cycler',
'num_days': '30',
'master': 'ChromiumPerf',
})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(3, len(alerts))
def testPost_Benchmark_NotMonitored(self):
self._AddCachedSuites()
self._AddTests()
response = self.testapp.post(
'/benchmark_health_report', {
'benchmark': 'sunspider',
'num_days': '30',
'master': 'ChromiumPerf',
})
self.assertFalse(self.GetJsonValue(response, 'monitored'))
def testPost_BenchmarkArgumentNumDaysArgument_ListsCorrectAlerts(self):
self._AddCachedSuites()
self._AddTests()
self._AddAnomalyEntities(
[(200, 400), (600, 800)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time'))
self._AddAnomalyEntities(
[(500, 700)],
utils.TestKey('ChromiumPerf/linux/page_cycler/load_time/cnn.com'))
anomalies = anomaly.Anomaly.query().fetch()
anomalies[0].timestamp = datetime.datetime.now() - datetime.timedelta(
days=20)
anomalies[0].put()
response = self.testapp.post(
'/benchmark_health_report',
{'benchmark': 'page_cycler', 'num_days': '5', 'master': 'ChromiumPerf'})
bots = self.GetJsonValue(response, 'bots')
self.assertItemsEqual(bots, ['linux'])
self.assertTrue(self.GetJsonValue(response, 'monitored'))
alerts = self.GetJsonValue(response, 'alerts')
self.assertEqual(2, len(alerts))
if __name__ == '__main__':
unittest.main()
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Access the Chandra archive via the arc5gl tool.
"""
import six
import pexpect
import os
# Should put in a watchdog timer to exit from arc5gl after a period of inactivity
import ska_helpers
__version__ = ska_helpers.get_version(__name__)
class Arc5gl(object):
def __init__(self, echo=False, timeout=100000):
"""Create an object for sending commands to arc5gl and waiting for the
prompt indicating command completion. Example::
arc5gl = Ska.arc5gl.Arc5gl()
arc5gl.sendline('obsid=21151')
arc5gl.sendline('get acis2{evt2}')
del arc5gl # explicitly shut things down, good idea
If the file ``${HOME}/.arc5gl_user`` exists then the content will be taken
as the user name to pass to the ``arc5gl`` Perl application for authentication.
Otherwise the linux username will be used.
:param echo: echo arc5gl output (default=False)
:param timeout: wait for up to timeout seconds for response (default=100000)
"""
args = ['--stdin']
arc5gl_user_file = os.path.join(os.environ['HOME'], '.arc5gl_user')
if os.path.exists(arc5gl_user_file):
user = open(arc5gl_user_file).read().strip()
args = args + ['--user={}'.format(user)]
self.prompt = 'ARC5GL> '
spawn = pexpect.spawn if six.PY2 else pexpect.spawnu
self.arc5gl = spawn('/proj/sot/ska/bin/arc5gl', args=args, timeout=timeout)
self.arc5gl.expect(self.prompt)
self.echo = echo
self.arc5gl.setecho(echo)
def sendline(self, line):
"""Send a single line to arc5gl and wait for the return prompt. There is no return value.
:param line: line of input
"""
self.arc5gl.sendline(line)
self.arc5gl.expect(self.prompt)
if self.echo:
print(self.prompt + self.arc5gl.before)
def __del__(self):
self.arc5gl.sendline('exit')
self.arc5gl.expect(pexpect.EOF)
self.arc5gl.close()
if self.echo:
print('Closed arc5gl')
|
import argparse
import cPickle as pickle
import numpy as np
import os
import matplotlib.pyplot as plt
import chainer
from chainer import optimizers
from chainer import serializers
import net
import trainer
import time
class CifarDataset(chainer.datasets.TupleDataset):
def __init__(self, x, y, augment=None):
super(CifarDataset, self).__init__(x, y)
self._augment = augment
def __getitem__(self, index):
items = super(CifarDataset, self).__getitem__(index)
if self._augment is None:
return items
if isinstance(index, slice):
return [(self._transform(x), y) for (x, y) in items]
else:
x, y = items
return self._transform(x), y
def _transform(self, x):
image = np.zeros_like(x)
size = x.shape[2]
if self._augment.get('crop', False):
offset = np.random.randint(-4, 5, size=(2,))
else:
offset = (0, 0)
if self._augment.get('mirror', False):
mirror = np.random.randint(2)
else:
mirror = 0
if self._augment.get('erase', False):
erase = np.random.randint(2)
else:
erase = 0
top, left = offset
left = max(0, left)
top = max(0, top)
right = min(size, left + size)
bottom = min(size, top + size)
if mirror > 0:
x = x[:,:,::-1]
image[:,size-bottom:size-top,size-right:size-left] = x[:,top:bottom,left:right]
if erase > 0:
while True:
s = np.random.uniform(0.02, 0.4) * size * size
r = np.random.uniform(-np.log(3.0), np.log(3.0))
r = np.exp(r)
w = int(np.sqrt(s / r))
h = int(np.sqrt(s * r))
left = np.random.randint(0, size)
top = np.random.randint(0, size)
if left + w < size and top + h < size:
break
c = np.random.randint(-128, 128)
image[:, top:top + h, left:left + w] = c
return image
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='CIFAR-10 dataset trainer')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU device ID (negative value indicates CPU)')
parser.add_argument('--model', '-m', type=str, default='vgg', choices=['cnn', 'cnnbn', 'cnnwn', 'vgg', 'residual', 'identity_mapping', 'vgg_no_fc', 'vgg_wide', 'vgg_crelu', 'inception', 'pyramid', 'shake_residual'],
help='Model name')
parser.add_argument('--batch_size', '-b', type=int, default=100,
help='Mini batch size')
parser.add_argument('--dataset', '-d', type=str, default='dataset/image.pkl',
help='Dataset image pkl file path')
parser.add_argument('--label', '-l', type=str, default='dataset/label.pkl',
help='Dataset label pkl file path')
parser.add_argument('--prefix', '-p', type=str, default=None,
help='Prefix of model parameter files')
parser.add_argument('--iter', type=int, default=300,
help='Training iteration')
parser.add_argument('--save_iter', type=int, default=0,
help='Iteration interval to save model parameter file.')
parser.add_argument('--lr_decay_iter', type=str, default='100',
help='Iteration interval to decay learning rate')
parser.add_argument('--lr_shape', type=str, default='multistep', choices=['multistep', 'cosine'],
help='Learning rate annealing function, multistep or cosine')
parser.add_argument('--weight_decay', type=float, default=0.0001,
help='Weight decay')
parser.add_argument('--optimizer', type=str, default='sgd', choices=['sgd', 'adam'],
help='Optimizer name')
parser.add_argument('--lr', type=float, default=0.01,
help='Initial learning rate for SGD')
parser.add_argument('--alpha', type=float, default=0.001,
help='Initial alpha for Adam')
parser.add_argument('--augment', type=str, default='crop,mirror',
help='Augmentation methods e.g. \'crop,mirror,erase\'')
parser.add_argument('--no_valid_data', action='store_true',
help='Do not use validation data')
parser.add_argument('--res_depth', type=int, default=18,
help='Depth of Residual Network')
parser.add_argument('--res_width', type=int, default=2,
help='Width of Residual Network')
parser.add_argument('--skip_depth', action='store_true',
help='Use stochastic depth in Residual Network')
parser.add_argument('--swapout', action='store_true',
help='Use swapout')
parser.add_argument('--seed', type=int, default=1,
help='Random seed')
args = parser.parse_args()
np.random.seed(args.seed)
log_file_path = '{}_log.csv'.format(args.prefix)
lr_decay_iter = map(int, args.lr_decay_iter.split(','))
augment_methods = args.augment.split(',')
augmentation = {x: True for x in augment_methods}
print('loading dataset...')
with open(args.dataset, 'rb') as f:
images = pickle.load(f)
with open(args.label, 'rb') as f:
labels = pickle.load(f)
index = np.random.permutation(len(images['train']))
if args.no_valid_data:
valid_data = None
train_index = index
else:
train_index = index[:-5000]
valid_index = index[-5000:]
valid_x = images['train'][valid_index].reshape((-1, 3, 32, 32))
valid_y = labels['train'][valid_index]
valid_data = CifarDataset(valid_x, valid_y, augment=None)
train_x = images['train'][train_index].reshape((-1, 3, 32, 32))
train_y = labels['train'][train_index]
train_data = CifarDataset(train_x, train_y, augment=augmentation)
test_x = images['test'].reshape((-1, 3, 32, 32))
test_y = labels['test']
test_data = CifarDataset(test_x, test_y, augment=None)
print('start training')
if args.model == 'cnn':
cifar_net = net.CNN()
elif args.model == 'cnnbn':
cifar_net = net.CNNBN()
elif args.model == 'cnnwn':
cifar_net = net.CNNWN()
elif args.model == 'residual':
cifar_net = net.ResidualNet(args.res_depth, swapout=args.swapout, skip=args.skip_depth)
elif args.model == 'identity_mapping':
cifar_net = net.IdentityMapping(args.res_depth, swapout=args.swapout, skip=args.skip_depth)
elif args.model == 'vgg_no_fc':
cifar_net = net.VGGNoFC()
elif args.model == 'vgg_wide':
cifar_net = net.VGGWide()
elif args.model == 'vgg_crelu':
cifar_net = net.VGGCReLU()
elif args.model == 'inception':
cifar_net = net.Inception()
elif args.model == 'pyramid':
cifar_net = net.PyramidNet(args.res_depth, skip=args.skip_depth)
elif args.model == 'shake_residual':
cifar_net = net.ShakeShakeResidualNet(args.res_depth, args.res_width)
else:
cifar_net = net.VGG()
if args.optimizer == 'sgd':
optimizer = optimizers.MomentumSGD(lr=args.lr)
else:
optimizer = optimizers.Adam(alpha=args.alpha)
optimizer.setup(cifar_net)
if args.weight_decay > 0:
optimizer.add_hook(chainer.optimizer.WeightDecay(args.weight_decay))
cifar_trainer = trainer.CifarTrainer(cifar_net, optimizer, args.iter, args.batch_size, args.gpu, lr_shape=args.lr_shape, lr_decay=lr_decay_iter)
if args.prefix is None:
model_prefix = '{}_{}'.format(args.model, args.optimizer)
else:
model_prefix = args.prefix
state = {'best_valid_error': 100, 'best_test_error': 100, 'clock': time.clock()}
def on_epoch_done(epoch, n, o, loss, acc, valid_loss, valid_acc, test_loss, test_acc, test_time):
error = 100 * (1 - acc)
print('epoch {} done'.format(epoch))
print('train loss: {} error: {}'.format(loss, error))
if valid_loss is not None:
valid_error = 100 * (1 - valid_acc)
print('valid loss: {} error: {}'.format(valid_loss, valid_error))
else:
valid_error = None
if test_loss is not None:
test_error = 100 * (1 - test_acc)
print('test loss: {} error: {}'.format(test_loss, test_error))
print('test time: {}s'.format(test_time))
else:
test_error = None
if valid_loss is not None and valid_error < state['best_valid_error']:
serializers.save_npz('{}.model'.format(model_prefix), n)
serializers.save_npz('{}.state'.format(model_prefix), o)
state['best_valid_error'] = valid_error
state['best_test_error'] = test_error
elif valid_loss is None:
serializers.save_npz('{}.model'.format(model_prefix), n)
serializers.save_npz('{}.state'.format(model_prefix), o)
state['best_test_error'] = test_error
if args.save_iter > 0 and (epoch + 1) % args.save_iter == 0:
serializers.save_npz('{}_{}.model'.format(model_prefix, epoch + 1), n)
serializers.save_npz('{}_{}.state'.format(model_prefix, epoch + 1), o)
# prevent divergence when using identity mapping model
if args.model == 'identity_mapping' and epoch < 9:
o.lr = 0.01 + 0.01 * (epoch + 1)
clock = time.clock()
print('elapsed time: {}'.format(clock - state['clock']))
state['clock'] = clock
with open(log_file_path, 'a') as f:
f.write('{},{},{},{},{},{},{}\n'.format(epoch + 1, loss, error, valid_loss, valid_error, test_loss, test_error))
with open(log_file_path, 'w') as f:
f.write('epoch,train loss,train acc,valid loss,valid acc,test loss,test acc\n')
cifar_trainer.fit(train_data, valid_data, test_data, on_epoch_done)
print('best test error: {}'.format(state['best_test_error']))
train_loss, train_acc, test_loss, test_acc = np.loadtxt(log_file_path, delimiter=',', skiprows=1, usecols=[1, 2, 5, 6], unpack=True)
epoch = len(train_loss)
xs = np.arange(epoch, dtype=np.int32) + 1
plt.clf()
fig, ax = plt.subplots()
ax.plot(xs, train_loss, label='train loss', c='blue')
ax.plot(xs, test_loss, label='test loss', c='red')
ax.set_xlim((1, epoch))
ax.set_xlabel('epoch')
ax.set_ylabel('loss')
ax.legend(loc='upper right')
plt.savefig('{}_loss.png'.format(args.prefix), bbox_inches='tight')
plt.clf()
fig, ax = plt.subplots()
ax.plot(xs, train_acc, label='train error', c='blue')
ax.plot(xs, test_acc, label='test error', c='red')
ax.set_xlim([1, epoch])
ax.set_xlabel('epoch')
ax.set_ylabel('error')
ax.legend(loc='upper right')
plt.savefig('{}_error'.format(args.prefix), bbox_inches='tight')
|
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.style.use('seaborn-notebook')
def animation(_):
data = pd.read_csv('data.csv')
x = data['x']
y1 = data['y1']
y2 = data['y2']
plt.cla()
plt.plot(x, y1, label='channel 1', color='green')
plt.plot(x, y2, label='channel 2', color='orange')
plt.legend(loc='upper left')
plt.tight_layout()
plt.grid(alpha=.3)
ani = FuncAnimation(plt.gcf(), animation, interval=500)
plt.tight_layout()
plt.show()
|
from tensorflow.keras.layers import Conv2D, Conv2DTranspose, UpSampling2D
from tensorflow.keras.layers import BatchNormalization, Activation, Input, ZeroPadding2D
from tensorflow.keras.layers import Add, Concatenate
from tensorflow.keras.models import Model
from ...layers import ReflectPadding2D, InstanceNormalization2D
import sys
padding = ZeroPadding2D
def normalize():
return InstanceNormalization2D()
def scaleup(input, ngf, kss, strides, padding):
x = UpSampling2D(strides)(input)
x = Conv2D(ngf, kss, padding=padding)(x)
return x
def res_block(input, filters, kernel_size=(3, 3), strides=(1, 1)):
x = padding()(input)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,)(x)
x = normalize()(x)
x = Activation('relu')(x)
x = padding()(x)
x = Conv2D(filters=filters,
kernel_size=kernel_size,
strides=strides,)(x)
x = normalize()(x)
merged = Add()([input, x])
return merged
def resnet_6blocks(input_shape, output_nc, ngf, **kwargs):
ks = 3
f = 7
p = (f-1)/2
input = Input(input_shape)
x = padding((int(p), int(p)))(input)
x = Conv2D(ngf, (f, f),)(x)
x = normalize()(x)
x = Activation('relu')(x)
x = Conv2D(ngf*2, (ks, ks), strides=(2, 2), padding='same')(x)
x = normalize()(x)
x = Activation('relu')(x)
x = Conv2D(ngf*4, (ks, ks), strides=(2, 2), padding='same')(x)
x = normalize()(x)
x = Activation('relu')(x)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = res_block(x, ngf*4)
x = scaleup(x, ngf*2, (ks, ks), strides=(2, 2), padding='same')
x = normalize()(x)
x = Activation('relu')(x)
x = scaleup(x, ngf, (ks, ks), strides=(2, 2), padding='same')
x = normalize()(x)
x = Activation('relu')(x)
x = padding((int(p), int(p)))(x)
x = Conv2D(output_nc, (f, f))(x)
x = Activation('tanh')(x)
model = Model(input, x, name=kwargs.get('name', None))
return model
|
import io
import solve
def test_count_increases_empty_list():
depth_list = []
increases = solve.count_increases(depth_list)
assert 0 == increases
def test_count_increases_single_increase():
depth_list = [0, 100]
increases = solve.count_increases(depth_list)
assert 1 == increases
def test_count_increases_single_decrease():
depth_list = [100, 0]
increases = solve.count_increases(depth_list)
assert 0 == increases
def test_count_increases_single_value_zero_increases():
depth_list = [100]
increases = solve.count_increases(depth_list)
assert 0 == increases
def test_count_increases_all_increases():
depth_list = [100, 101, 102, 103, 104, 105]
increases = solve.count_increases(depth_list)
assert len(depth_list) - 1 == increases
def test_count_increases_all_decreases():
depth_list = reversed([100, 101, 102, 103, 104, 105])
increases = solve.count_increases(depth_list)
assert 0 == increases
def test_part_1_sample_input():
depth_list = [
199,
200,
208,
210,
200,
207,
240,
269,
260,
263,
]
increases = solve.count_increases(depth_list)
assert 7 == increases
def test_load_depth_list_empty_file():
file_obj = io.StringIO("")
depth_list = solve.load_depth_list(file_obj)
assert depth_list == []
def test_load_depth_list_single_line():
file_obj = io.StringIO("100")
depth_list = solve.load_depth_list(file_obj)
assert depth_list == [100]
def test_load_depth_list_multi_line():
file_obj = io.StringIO("""100
200
300
400
500""")
depth_list = solve.load_depth_list(file_obj)
assert depth_list == [100, 200, 300, 400, 500]
def test_load_depth_list_ignore_empty_lines():
file_obj = io.StringIO("""100
200
300
400
500""")
depth_list = solve.load_depth_list(file_obj)
assert depth_list == [100, 200, 300, 400, 500]
def test_sliding_windows_empty_list():
measurements = []
sums = solve.sliding_windows(measurements)
assert sums == []
def test_sliding_windows_not_enough_measurements():
measurements = [100, 200]
sums = solve.sliding_windows(measurements, window=3)
assert sums == []
def test_sliding_windows_sum_of_measurements():
measurements = [100, 200, 300]
sums = solve.sliding_windows(measurements, window=3)
assert sums == [600]
def test_sliding_windows_sum_per_window():
measurements = [100, 200, 300, 10]
sums = solve.sliding_windows(measurements, window=3)
assert sums == [600, 510]
def test_sliding_windows_extra_measurements_ignored():
measurements = [100, 200, 300, 10, 20]
sums = solve.sliding_windows(measurements, window=3)
assert sums == [600, 510, 330]
def test_part_2_sample_input():
depth_list = [
199,
200,
208,
210,
200,
207,
240,
269,
260,
263,
]
sums = solve.sliding_windows(depth_list, window=3)
print(sums)
increases = solve.count_increases(sums)
assert 5 == increases
|
from django.apps import AppConfig
class RepairandbuyerConfig(AppConfig):
name = 'repairANDbuyer'
verbose_name = '蓝快维修与采购'
|
import os
import sys
from multiprocessing import Process, Queue
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from couchdbkit import ResourceConflict, ResourceNotFound
from six.moves.urllib.parse import urlparse
from dimagi.utils.couch.database import iter_docs
# doctypes we want to be careful not to copy, which must be explicitly
# specified with --include
from dimagi.utils.parsing import json_format_date
from corehq.apps.domain.models import Domain
from corehq.apps.domainsync.config import DocumentTransform, save
from corehq.apps.domainsync.management.commands.copy_utils import (
copy_postgres_data_for_docs,
)
from corehq.util.couchdb_management import CouchConfig
from corehq.util.dates import iso_string_to_date
DEFAULT_EXCLUDE_TYPES = [
'ReportNotification',
'WeeklyNotification',
'DailyNotification'
]
NUM_PROCESSES = 8
class Command(BaseCommand):
"""
DEPRECATED/NEEDS WORK - `copy_domain` is basically broken because of
- attachments in blobdb
- SQL data.
Scale trello card to update this: https://trello.com/c/OGGrmoGI/16-copydomain
"""
help = "Copies the contents of a domain to another database. " \
"If targetdb is not specified, the target is the database " \
"specified by COUCH_DATABASE in your settings."
def add_arguments(self, parser):
parser.add_argument(
'sourcedb',
)
parser.add_argument(
'domain',
)
parser.add_argument(
'targetdb',
nargs='?',
)
parser.add_argument(
'--include',
action='store',
dest='doc_types',
default='',
help='Comma-separated list of Document Types to copy',
)
parser.add_argument(
'--exclude',
action='store',
dest='doc_types_exclude',
default='',
help='Comma-separated list of Document Types to NOT copy.',
)
parser.add_argument(
'--exclude-attachments',
action='store_true',
dest='exclude_attachments',
default=False,
help="Don't copy document attachments, just the docs themselves.",
)
parser.add_argument(
'--since',
action='store',
dest='since',
default='',
help='Only copy documents newer than this date. Format: yyyy-MM-dd. Only ',
)
parser.add_argument(
'--list-types',
action='store_true',
dest='list_types',
default=False,
help='Don\'t copy anything, just list all the available document types.',
)
parser.add_argument(
'--simulate',
action='store_true',
dest='simulate',
default=False,
help='Don\'t copy anything, print what would be copied.',
)
parser.add_argument(
'--id-file',
action='store',
dest='id_file',
default='',
help="File containing one document ID per line. Only docs with these ID's will be copied",
)
parser.add_argument(
'--postgres-db',
action='store',
dest='postgres_db',
default='',
help="Name of postgres database to pull additional data from. This should map to a "
"key in settings.DATABASES. If not specified no additional postgres data will be "
"copied. This is currently used to pull CommCare Supply models.",
)
parser.add_argument(
'--postgres-password',
action='store',
dest='postgres_password',
default='',
help="Password for postgres database to pull additional data from. If not specified will "
"default to the value in settings.DATABASES",
)
parser.add_argument(
'--dont-run-multi-process',
action='store_false',
dest='run_multi_process',
default=True,
help="If set to true this spawn multiple processes which should speed up the time taken to "
"copy. This must be false if running in a supervised process",
)
def iter_source_dbs(self):
for sourcedb_name, sourcedb in self.source_couch.all_dbs_by_slug.items():
if sourcedb_name not in self.exclude_dbs:
print("In {} db".format(sourcedb_name or "the main"))
yield sourcedb_name, sourcedb
def _get_couch_database_configs_from_string(self, db_string):
sourcedb_parse_result = urlparse(db_string)
return CouchConfig({
'default': {
'COUCH_HTTPS': sourcedb_parse_result.scheme == 'https',
'COUCH_SERVER_ROOT': sourcedb_parse_result.hostname,
'COUCH_USERNAME': sourcedb_parse_result.username,
'COUCH_PASSWORD': sourcedb_parse_result.password,
'COUCH_DATABASE_NAME': sourcedb_parse_result.path.lstrip('/')
}
})
def handle(self, sourcedb, domain, targetdb, **options):
self.exclude_dbs = (
# these have data we don't want to copy
'receiverwrapper', 'auditcare', 'fluff-bihar',
'fluff-mc', 'fluff-cvsu', 'mvp-indicators', 'm4change',
# todo: missing domain/docs, but probably want to add back
'meta',
)
self.source_couch = source_couch = self._get_couch_database_configs_from_string(sourcedb)
simulate = options['simulate']
exclude_attachments = options['exclude_attachments']
self.run_multi_process = options['run_multi_process']
since = json_format_date(iso_string_to_date(options['since'])) if options['since'] else None
if options['list_types']:
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.list_types(sourcedb, domain, since)
sys.exit(0)
if simulate:
print("\nSimulated run, no data will be copied.\n")
if options['postgres_db'] and options['postgres_password']:
settings.DATABASES[options['postgres_db']]['PASSWORD'] = options['postgres_password']
self.target_couch = self._get_couch_database_configs_from_string(targetdb)
try:
domain_doc = Domain.get_by_name(domain)
except ResourceNotFound:
domain_doc = None
if domain_doc is None:
self.copy_domain(source_couch, domain)
if options['doc_types']:
doc_types = options['doc_types'].split(',')
for doc_type in doc_types:
sourcedb = source_couch.get_db_for_doc_type(doc_type)
startkey = [x for x in [domain, doc_type, since] if x is not None]
endkey = [x for x in [domain, doc_type, {}] if x is not None]
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, doc_type=doc_type, since=since,
postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
elif options['id_file']:
path = options['id_file']
if not os.path.isfile(path):
print("Path '%s' does not exist or is not a file" % path)
sys.exit(1)
with open(path) as input:
doc_ids = [line.rstrip('\n') for line in input]
if not doc_ids:
print("Path '%s' does not contain any document ID's" % path)
sys.exit(1)
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.copy_docs(sourcedb, domain, simulate, doc_ids=doc_ids, postgres_db=options['postgres_db'],
exclude_attachments=exclude_attachments)
else:
startkey = [domain]
endkey = [domain, {}]
exclude_types = DEFAULT_EXCLUDE_TYPES + options['doc_types_exclude'].split(',')
for sourcedb_name, sourcedb in self.iter_source_dbs():
self.copy_docs(sourcedb, domain, simulate, startkey, endkey, exclude_types=exclude_types,
postgres_db=options['postgres_db'], exclude_attachments=exclude_attachments)
def list_types(self, sourcedb, domain, since):
doc_types = sourcedb.view("by_domain_doc_type_date/view", startkey=[domain],
endkey=[domain, {}], reduce=True, group=True, group_level=2)
doc_count = dict([(row['key'][1], row['value']) for row in doc_types])
if since:
for doc_type in sorted(doc_count.keys()):
num_since = sourcedb.view("by_domain_doc_type_date/view", startkey=[domain, doc_type, since],
endkey=[domain, doc_type, {}], reduce=True).all()
num = num_since[0]['value'] if num_since else 0
print("{0:<30}- {1:<6} total {2}".format(doc_type, num, doc_count[doc_type]))
else:
for doc_type in sorted(doc_count.keys()):
print("{0:<30}- {1}".format(doc_type, doc_count[doc_type]))
def copy_docs(self, sourcedb, domain, simulate, startkey=None, endkey=None, doc_ids=None,
doc_type=None, since=None, exclude_types=None, postgres_db=None, exclude_attachments=False):
if not doc_ids:
doc_ids = [result["id"] for result in sourcedb.view("by_domain_doc_type_date/view", startkey=startkey,
endkey=endkey, reduce=False)]
total = len(doc_ids)
count = 0
msg = "Found %s matching documents in domain: %s" % (total, domain)
msg += " of type: %s" % (doc_type) if doc_type else ""
msg += " since: %s" % (since) if since else ""
print(msg)
err_log = self._get_err_log()
if self.run_multi_process:
queue = Queue(150)
for i in range(NUM_PROCESSES):
Worker(queue, sourcedb, self.target_couch, exclude_types, total, simulate, err_log, exclude_attachments).start()
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
count += 1
queue.put((doc, count))
# shutdown workers
for i in range(NUM_PROCESSES):
queue.put(None)
else:
for doc in iter_docs(sourcedb, doc_ids, chunksize=100):
target = self.target_couch.get_db_for_doc_type(doc['doc_type'])
count += 1
copy_doc(doc, count, sourcedb, target, exclude_types, total, simulate, exclude_attachments)
err_log.close()
if os.stat(err_log.name)[6] == 0:
os.remove(err_log.name)
else:
print('Failed document IDs written to %s' % err_log.name)
if postgres_db:
copy_postgres_data_for_docs(postgres_db, doc_ids=doc_ids, simulate=simulate)
def copy_domain(self, source_couch, domain):
print("Copying domain doc")
sourcedb = source_couch.get_db_for_class(Domain)
result = sourcedb.view(
"domain/domains",
key=domain,
reduce=False,
include_docs=True
).first()
if result and 'doc' in result:
domain_doc = Domain.wrap(result['doc'])
dt = DocumentTransform(domain_doc._obj, sourcedb)
save(dt, self.target_couch.get_db_for_doc_type(domain_doc['doc_type']))
else:
print("Domain doc not found for domain %s." % domain)
def _get_err_log(self):
name = 'copy_domain.err.%s'
for i in range(1000): # arbitrarily large number
candidate = name % i
if not os.path.isfile(candidate):
return open(candidate, 'a', buffering=1, encoding='utf-8')
class Worker(Process):
def __init__(self, queue, sourcedb, target_couch, exclude_types, total, simulate, err_log, exclude_attachments):
super(Worker, self).__init__()
self.queue = queue
self.sourcedb = sourcedb
self.target_couch = target_couch
self.exclude_types = exclude_types
self.exclude_attachments = exclude_attachments
self.total = total
self.simulate = simulate
self.err_log = err_log
def run(self):
for doc, count in iter(self.queue.get, None):
try:
target = self.target_couch.get_db_for_doc_type(doc['doc_type'])
copy_doc(doc, count, self.sourcedb, target, self.exclude_types, self.total, self.simulate,
self.exclude_attachments)
except Exception as e:
self.err_log.write('%s\n' % doc["_id"])
print(" Document %s failed! Error is: %s %s" % (doc["_id"], e.__class__.__name__, e))
def copy_doc(doc, count, sourcedb, target_couch, exclude_types, total, simulate, exclude_attachments):
if exclude_types and doc["doc_type"] in exclude_types:
print(" SKIPPED (excluded type: %s). Synced %s/%s docs (%s: %s)" % \
(doc["doc_type"], count, total, doc["doc_type"], doc["_id"]))
else:
if not simulate:
dt = DocumentTransform(doc, sourcedb, exclude_attachments)
for i in reversed(list(range(5))):
try:
save(dt, target_couch)
break
except (ResourceConflict, TypeError):
if i == 0:
raise
print(" Synced %s/%s docs (%s: %s)" % (count, total, doc["doc_type"], doc["_id"]))
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
@Time : 2019-09-21 19:54
@Author : Wang Xin
@Email : [email protected]
@File : __init__.py.py
""" |
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_impl import Functional, FunctionalComplex
class TestFunctionalFloat32(Functional, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
class TestFunctionalComplex64(FunctionalComplex, PytorchTestCase):
complex_dtype = torch.complex64
real_dtype = torch.float32
device = torch.device('cpu')
class TestFunctionalComplex128(FunctionalComplex, PytorchTestCase):
complex_dtype = torch.complex128
real_dtype = torch.float64
device = torch.device('cpu')
|
import os
root_dir = os.path.expanduser("~")
root_dir = os.path.join(root_dir, "Desktop")
print_interval = 100
save_model_iter = 1000
train_data_path = os.path.join(root_dir, "Reinforce-Paraphrase-Generation/data/twitter_url/chunked/train_*")
eval_data_path = os.path.join(root_dir, "Reinforce-Paraphrase-Generation/data/twitter_url/chunked/val_*")
decode_data_path = os.path.join(root_dir, "Reinforce-Paraphrase-Generation/data/twitter_url/chunked/test_*")
vocab_path = os.path.join(root_dir, "Reinforce-Paraphrase-Generation/data/twitter_url/vocab")
log_root = os.path.join(root_dir, "Reinforce-Paraphrase-Generation/log_twitter")
# Hyperparameters
mode = "MLE" # other options: RL/GTI/SO/SIO/DAGGER/DAGGER*
alpha = 1.0
beta = 1.0
k1 = 0.9999
k2 = 3000.
hidden_dim= 256
emb_dim= 128
batch_size= 8
sample_size= 4
max_enc_steps= 20
max_dec_steps= 20
beam_size= 8
min_dec_steps= 5
vocab_size= 5000
max_iterations = 5000000
lr = 1e-5
pointer_gen = True
is_coverage = False
lr_coverage = 0.15
cov_loss_wt = 1.0
max_grad_norm = 2.0
rand_unif_init_mag = 0.02
trunc_norm_init_std = 1e-4
eps = 1e-12
use_gpu = True |
import math
from linked_list import LinkedList, Node
# Time complexity: O(N)
# Space complexity: O(N)
def palindrome(s_list: LinkedList):
current_node = s_list.head
new_list = LinkedList()
while current_node:
new_list.unshift(current_node.value)
current_node = current_node.next
return new_list == s_list
# Time complexity: O(N)
# Space complexity: O(N)
def palindrome_2(s_list: LinkedList):
chars = []
current_node = s_list.head
while current_node:
chars.append(current_node.value)
current_node = current_node.next
left = 0
right = len(chars) - 1
while right > left:
if chars[right] != chars[left]:
return False
right -= 1
left += 1
return True
if __name__ == "__main__":
s1 = LinkedList().push('l').push('e').push('v').push('e').push('l')
s2 = LinkedList().push('l').push('e').push('v').push('f').push('l')
print(palindrome(s1))
print(palindrome(s2))
|
import os
import json
import logging
from datetime import date, datetime
import boto3
import botocore
# logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def lambda_handler(event, context):
class CrawlerThrottlingException(Exception): pass
class CrawlerRunningException(Exception): pass
try:
# Get Glue crawler name
crawler_name = event['CrawlerName']
logger.info('CrawlerName: %s', crawler_name)
glue_client = boto3.client('glue')
response = glue_client.start_crawler(Name=crawler_name)
logger.info('Response: %s', json.dumps(response))
return {
"StatusCode": response['ResponseMetadata']['HTTPStatusCode']
}
except botocore.exceptions.ClientError as e:
logger.exception(e, exc_info=False)
if e.response.get('Error', {}).get('Code') == 'ThrottlingException':
raise CrawlerThrottlingException(e)
elif e.response.get('Error', {}).get('Code') == 'CrawlerRunningException':
raise CrawlerRunningException(e)
else:
raise e
except Exception as e:
logger.exception(e, exc_info=False)
raise e |