repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
22 values
size
stringlengths
4
7
content
stringlengths
626
1.05M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
5.21
99.9
line_max
int64
12
999
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
gregorianzhang/bbcnews
lab06_11.py
1
1365
#-*- coding:utf-8 -*- #!/usr/bin/python dict1 = { "London": ["British Museum", "TATE Britain", "TATE Modern", "National Gallery","Natural History Museum", "Victoria and Albert Museum"], "Lisbon" : ["Ancient Art Museum", "Design Museum", "Tile Museum", "Berado Museum","Belem Cultural Centre"], "Madrid" : ["Prado", "Reina Sofia", "Thyssen", "CaixaForum"], "Paris" : ["Musée Rodin", "Louvre", "Musée d'Orsay", "Centre Georges-Pompidou" ], "Rome" : ["Galleria Borghese", "Palazzo Barberini", "Vatican Museum", "Galleriad'arte contemporanea"], "Berlin" : ["Deutsches Historisches Musem", "DDR Museum", "Altes Museum", "AlteNationalgalerie"], "Vienna": ["Albertina", "Leopold Museum", "Kunst historisches museum", "SigmundFreud Museum"], } with open('the european tour.html','w') as html: html.write('<html>' + '\n') html.write('<head><title>European Grand Tour</title></head>' + '\n') html.write('<body>' + '\n') html.write('<h1>European Grand Tour</h1>' + '\n') for k,v in dict1.iteritems(): html.write('<h2>' + k + '</h2>' + '\n') html.write('<ul>' + '\n') for a in v: html.write('<li>' + a + '</li>' + '\n') html.write('</ul>' + '\n') html.write('</body>'+ '\n') html.write('</html>'+ '\n') #print dict1
gpl-3.0
5,223,513,341,668,158,000
41.59375
143
0.560528
false
tboch/mocpy
mocpy/tests/test_moc.py
1
12065
import pytest import copy import sys import numpy as np from astropy.coordinates import SkyCoord, ICRS, Angle from astropy.io.votable import parse_single_table import astropy.units as u from astropy.io import fits import cdshealpix from ..moc import MOC, World2ScreenMPL #### TESTING MOC creation #### def get_random_skycoords(size): return SkyCoord(ra=np.random.uniform(0, 360, size), dec=np.random.uniform(-90, 90, size), unit="deg") skycoords1 = get_random_skycoords(size=1000) skycoords2 = get_random_skycoords(size=2000) skycoords3 = get_random_skycoords(size=50000) @pytest.fixture() def skycoords_gen_f(): def gen_f(size): return SkyCoord(np.random.uniform(0, 360, size), np.random.uniform(-90, 90, size), unit='deg') return gen_f @pytest.fixture() def lonlat_gen_f(): def gen_f(size): return np.random.uniform(0, 360, size) * u.deg, np.random.uniform(-90, 90, size) * u.deg return gen_f @pytest.mark.parametrize("size", [ 1000, 10000, 50000 ]) def test_moc_from_skycoords(skycoords_gen_f, size): skycoords = skycoords_gen_f(size) moc = MOC.from_skycoords(skycoords, max_norder=7) @pytest.mark.parametrize("size", [ 1000, 10000, 50000 ]) def test_moc_from_lonlat(lonlat_gen_f, size): lon, lat = lonlat_gen_f(size) moc = MOC.from_lonlat(lon=lon, lat=lat, max_norder=6) def test_from_healpix_cells(): ipix = np.array([40, 87, 65]) depth = np.array([3, 3, 3]) fully_covered = np.array([True, True, True]) moc = MOC.from_healpix_cells(ipix, depth, fully_covered) def test_moc_from_fits(): fits_path = 'resources/P-GALEXGR6-AIS-FUV.fits' moc = MOC.from_fits(fits_path) def test_moc_consistent_with_aladin(): truth = MOC.from_fits('resources/CDS-I-125A-catalog_MOC.fits') table = parse_single_table("resources/I_125A_catalog.vot").to_table() moc = MOC.from_lonlat( table['_RAJ2000'].T * u.deg, table['_DEJ2000'].T * u.deg, max_norder=8 ) assert moc == truth def test_moc_from_fits_images(): image_path = 'resources/image_with_mask.fits.gz' moc = MOC.from_fits_images([image_path], max_norder=15) def test_from_fits_images_2(): MOC.from_fits_images(['resources/u_gal.fits'], max_norder=10) @pytest.fixture() def moc_from_fits_image(): image_path = 'resources/image_with_mask.fits.gz' with fits.open(image_path) as hdulist: moc = MOC.from_fits_image(hdu=hdulist[0], max_norder=7, mask=hdulist[0].data) return moc @pytest.fixture() def moc_from_json(): return MOC.from_json({'8': [45, 78], '4': [42, 57]}) def test_moc_from_fits_image(moc_from_fits_image): assert isinstance(moc_from_fits_image, MOC) def test_moc_serialize_and_from_json(moc_from_json): ipix_d = moc_from_json.serialize(format="json") moc2 = MOC.from_json(ipix_d) assert moc_from_json == moc2 @pytest.mark.parametrize("expected, moc_str", [ (MOC.from_json({'5': [8, 9, 10, 42, 43, 44, 45, 54, 46], '6':[4500], '7':[], '8':[45]}), '5/8-10 42-46 54\n\r8 6/4500 8/45'), (MOC.from_json({}), '0/'), (MOC.from_json({'29': [101]}), '29/101'), (MOC.from_json({'0': [1, 0, 9]}), '0/0-1 9'), (MOC.from_json({'0': [2, 9], '1': [9]}), '0/2 9'), (MOC.from_json({'0': [2], '8': [8, 9, 10], '11': []}), '0/2\r \n 8/8-10\n 11/'), ]) def test_from_str(expected, moc_str): assert MOC.from_str(moc_str) == expected def test_moc_full_skyfraction(): moc = MOC.from_json({ '0': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11] }) assert moc.sky_fraction == 1.0 def test_moc_skyfraction(): moc = MOC.from_json({ '0': [0, 1, 2, 3, 4, 5] }) assert moc.sky_fraction == 0.5 def test_sky_fraction_on_empty_coverage(): moc = MOC() assert moc.sky_fraction == 0 #### TESTING MOC serialization #### def test_moc_serialize_to_fits(moc_from_fits_image): hdulist = moc_from_fits_image.serialize(format='fits') assert isinstance(hdulist, fits.hdu.hdulist.HDUList) def test_moc_serialize_to_json(moc_from_fits_image): moc_json = moc_from_fits_image.serialize(format='json') assert isinstance(moc_json, dict) @pytest.mark.parametrize("moc, expected", [ (MOC.from_json({'5': [8, 9, 10, 42, 43, 44, 45, 54, 46], '6':[4500], '7':[], '8':[45]}), '5/8-10 42-46 54 6/4500 8/45'), (MOC.from_json({}), ''), (MOC.from_json({'29': [101]}), '29/101'), (MOC.from_json({'0': [1, 0, 9]}), '0/0-1 9'), (MOC.from_json({'0': [2, 9], '1': [9]}), '0/2 9'), ]) def test_serialize_to_str(moc, expected): assert moc.serialize(format="str") == expected @pytest.mark.parametrize("filename, overwrite, format, os_error", [ ('moc', True, 'fits', False), ('moc', False, 'fits', True), ('moc', True, 'json', False), ('moc', True, 'str', False), ('moc', False, 'str', True), ]) def test_write(moc_from_json, filename, overwrite, format, os_error): if os_error: with pytest.raises(OSError): moc_from_json.write(filename, format=format, overwrite=overwrite) else: moc_from_json.write(filename, format=format, overwrite=overwrite) #### TESTING MOC plot functions #### def test_mpl_fill(): fits_path = 'resources/P-GALEXGR6-AIS-FUV.fits' moc = MOC.from_fits(fits_path) import matplotlib.pyplot as plt fig = plt.figure(111, figsize=(10, 10)) with World2ScreenMPL(fig, fov=50 * u.deg, center=SkyCoord(0, 20, unit='deg', frame='icrs'), coordsys="icrs", rotation=Angle(0, u.degree), projection="AIT") as wcs: ax = fig.add_subplot(1, 1, 1, projection=wcs) moc.fill(ax=ax, wcs=wcs, alpha=0.5, color='r') def test_mpl_border(): fits_path = 'resources/P-GALEXGR6-AIS-FUV.fits' moc = MOC.from_fits(fits_path) import matplotlib.pyplot as plt fig = plt.figure(111, figsize=(10, 10)) with World2ScreenMPL(fig, fov=50 * u.deg, center=SkyCoord(0, 20, unit='deg', frame='icrs'), coordsys="icrs", rotation=Angle(0, u.degree), projection="AIT") as wcs: ax = fig.add_subplot(1, 1, 1, projection=wcs) moc.border(ax=ax, wcs=wcs, color='g') #### TESTING MOC features #### def test_moc_contains(): order = 4 size = 20 healpix_arr = np.random.randint(0, 12*4**order, size) all_healpix_arr = np.arange(12*4**order) healpix_outside_arr = np.setdiff1d(all_healpix_arr, healpix_arr) moc = MOC.from_json(json_moc={str(order): healpix_arr.tolist()}) lon, lat = cdshealpix.healpix_to_lonlat(healpix_arr, order) lon_out, lat_out = cdshealpix.healpix_to_lonlat(healpix_outside_arr, order) should_be_inside_arr = moc.contains(ra=lon, dec=lat) assert should_be_inside_arr.all() should_be_outside_arr = moc.contains(ra=lon_out, dec=lat_out) assert not should_be_outside_arr.any() # test keep_inside field should_be_outside_arr = moc.contains(ra=lon, dec=lat, keep_inside=False) assert not should_be_outside_arr.any() should_be_inside_arr = moc.contains(ra=lon_out, dec=lat_out, keep_inside=False) assert should_be_inside_arr.all() def test_degrade_to_order(): hst_fits_path = 'resources/hst.fits' hst_moc = MOC.from_fits(hst_fits_path) max_depth = hst_moc.max_order for order in reversed(range(0, max_depth)): hst_moc = hst_moc.degrade_to_order(order) assert(hst_moc.sky_fraction <= 1.0) # TODO: IMPROVE THE ALGO ''' def test_boundaries(): fits_path = 'resources/P-GALEXGR6-AIS-FUV.fits' moc = MOC.from_fits(fits_path) moc = moc.degrade_to_order(6) boundaries_l = moc.get_boundaries() ''' def test_from_elliptical_cone(): moc = MOC.from_elliptical_cone( lon=0 * u.deg, lat=0 * u.deg, a=Angle(10, u.deg), b=Angle(5, u.deg), pa=Angle(0, u.deg), max_depth=10) @pytest.fixture() def mocs(): moc1 = {'1': [0]} moc1_increased = {'0': [0], '1': [17, 19, 22, 23, 35]} moc2 = {'1': [30]} moc2_increased = {'0': [7], '1': [8, 9, 25, 43, 41]} return dict(moc1=MOC.from_json(moc1), moc1_increased=MOC.from_json(moc1_increased), moc2=MOC.from_json(moc2), moc2_increased=MOC.from_json(moc2_increased)) def test_add_neighbours(mocs): mocs['moc1'].add_neighbours() assert mocs['moc1'] == mocs['moc1_increased'] mocs['moc2'].add_neighbours() assert mocs['moc2'] == mocs['moc2_increased'] def test_remove_neighbours(mocs): mocs['moc1_increased'].remove_neighbours() mocs['moc2_increased'].remove_neighbours() assert mocs['moc1_increased'] == mocs['moc1'] assert mocs['moc2_increased'] == mocs['moc2'] def test_neighbours(mocs): moc1 = copy.deepcopy(mocs['moc1']) moc2 = copy.deepcopy(mocs['moc2']) moc1.add_neighbours().remove_neighbours() moc2.add_neighbours().remove_neighbours() assert moc1 == mocs['moc1'] assert moc2 == mocs['moc2'] #### TESTING MOC operations #### @pytest.fixture() def mocs_op(): moc1 = MOC.from_json({ '0': [0, 2, 3, 4, 5] }) moc2 = MOC.from_json({ '0': [0, 1, 7, 4, 3] }) return dict(first=moc1, second=moc2) def test_moc_union(mocs_op): assert mocs_op['first'].union(mocs_op['second']) == MOC.from_json({ '0': [0, 1, 2, 3, 4, 5, 7] }) def test_moc_intersection(mocs_op): assert mocs_op['first'].intersection(mocs_op['second']) == MOC.from_json({ '0': [0, 3, 4] }) def test_moc_difference(mocs_op): assert mocs_op['first'].difference(mocs_op['second']) == MOC.from_json({ '0': [2, 5] }) def test_moc_complement_consistency(): moc = MOC.from_fits('resources/P-GALEXGR6-AIS-FUV.fits') assert moc.complement().complement() == moc def test_from_fits_old(): moc = MOC.from_fits('resources/V_147_sdss12.moc.fits') assert moc.complement().complement() == moc @pytest.mark.parametrize("input, expected", [ (MOC.from_json({'0': [1, 3]}), MOC.from_json({'0': [0, 2, 4, 5, 6, 7, 8, 9, 10, 11]})) ]) def test_moc_complement(input, expected): assert input.complement() == expected def test_spatial_res_to_order(): order = np.arange(14) res = MOC.order_to_spatial_resolution(order) output = MOC.spatial_resolution_to_order(res) assert (order == output).all() def test_from_valued_healpix_cells_empty(): uniq = np.array([]) values = np.array([]) MOC.from_valued_healpix_cells(uniq, values) def test_from_valued_healpix_cells_different_sizes(): uniq = np.array([500]) values = np.array([]) with pytest.raises(ValueError): MOC.from_valued_healpix_cells(uniq, values) def test_from_valued_healpix_cells_cumul_from_sup_cumul_to(): uniq = np.array([500]) values = np.array([1.0]) with pytest.raises(ValueError): MOC.from_valued_healpix_cells(uniq, values, cumul_from=0.8, cumul_to=-5.0) @pytest.mark.parametrize("cumul_from, cumul_to", [ (-5.0, 1.0), (np.nan, np.inf), (np.nan, np.nan), (np.inf, np.nan), (-10.0, -5.0) ]) def test_from_valued_healpix_cells_weird_values(cumul_from, cumul_to): uniq = np.array([500]) values = np.array([-1.0]) MOC.from_valued_healpix_cells(uniq, values, cumul_from=cumul_from, cumul_to=cumul_to) def test_from_valued_healpix_cells_bayestar(): from astropy.io import fits fits_image_filename = './resources/bayestar.multiorder.fits' with fits.open(fits_image_filename) as hdul: hdul.info() hdul[1].columns data = hdul[1].data uniq = data['UNIQ'] probdensity = data['PROBDENSITY'] import astropy_healpix as ah import astropy.units as u level, ipix = ah.uniq_to_level_ipix(uniq) area = ah.nside_to_pixel_area(ah.level_to_nside(level)).to_value(u.steradian) prob = probdensity * area cumul_to = np.linspace(0.01, 2.0, num=10) for b in cumul_to: MOC.from_valued_healpix_cells(uniq, prob, 12, cumul_from=0.0, cumul_to=b)
gpl-3.0
-1,678,596,017,511,105,300
26.799539
102
0.616494
false
mozilla/elmo
apps/shipping/views/prod_signoffs.py
1
5019
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. """Views and helpers for PM sign-off views. """ from __future__ import absolute_import, print_function from __future__ import unicode_literals from collections import OrderedDict import json from django.http import HttpResponse from django.contrib.auth.mixins import PermissionRequiredMixin from django.views.generic import TemplateView, View from django.shortcuts import get_object_or_404 from django.db.models import Max from shipping.models import ( AppVersion, Action, ) from life.models import ( Changeset, Locale, Repository, Push, ) from l10nstats.models import ( Run, ) from shipping.api import ( flags4appversions, ) class SignoffTableView(TemplateView): template_name = 'shipping/prod-signoffs.html' def get(self, request, appver_code): appver = get_object_or_404(AppVersion, code=appver_code) context = self.get_context_data(appver) return self.render_to_response(context) def get_context_data(self, appver): # av -> loc code -> [av_code, {flag -> action}] # Resolve the av key right away flags = flags4appversions([appver])[appver] flags = { loc: flag_actions.values() for loc, (_, flag_actions) in flags.items() } all_actions = [] for actions in flags.values(): all_actions += actions locales = ( Locale.objects .filter( code__in=flags.keys(), ) ) l2so = dict( locales .filter( repository__push__signoff__action__in=all_actions ) .annotate(last_signoff=Max('repository__push')) .values_list('code', 'last_signoff') ) repos = Repository.objects.filter(push__in=l2so.values()) l2p = dict( locales .filter( repository__in=repos ) .annotate(last_push=Max('repository__push')) .values_list('code', 'last_push') ) locale_count = len(flags) push_ids = set() revs_for_runs = set() rows = OrderedDict() for loc in sorted(l2p): if l2p[loc] == l2so[loc]: # latest push has sign-off data continue rows[loc] = { 'push': l2p[loc], 'signoff': l2so[loc], 'tip': l2p[loc], } push_ids.update((l2so[loc], l2p[loc])) p2tip = { id_: { 'name': name, 'cs': last, } for id_, name, last in Push.objects .filter(id__in=push_ids) .annotate(last=Max('changesets')) .values_list('id', 'repository__name', 'last') } cs2rev = dict( Changeset.objects .filter(id__in=(t['cs'] for t in p2tip.values())) .values_list('id', 'revision') ) for row in rows.values(): row['repo'] = p2tip[row['tip']]['name'] revs_for_runs.add(p2tip[row['tip']]['cs']) for p in ('signoff', 'tip'): row[p] = cs2rev[p2tip[row[p]]['cs']] trees = self.get_compare(appver, revs_for_runs, rows) return { 'appver': appver, 'trees': trees, 'total_count': locale_count, 'rows': rows, } def get_compare(self, appver, revs, rows): tree = appver.trees_over_time.current()[0].tree runs = { run.locale.code: run for run in Run.objects.filter( revisions__in=revs, tree=tree, ).select_related('locale') } for loc, row in rows.items(): row['runs'] = [runs.get(loc)] return [tree] class SignOffView(PermissionRequiredMixin, View): permission_required = ('shipping.add_signoff', 'shipping.review_signoff') def post(self, request, av_code, loc_code, push_id, **kwargs): av = get_object_or_404(AppVersion, code=av_code) loc = get_object_or_404(Locale, code=loc_code) push = get_object_or_404(Push, id=int(push_id)) action = ( Action.ACCEPTED if request.GET.get('action') == 'accept' else Action.REJECTED ) so = av.signoffs.create(push=push, author=request.user, locale=loc) so.action_set.create(flag=Action.PENDING, author=request.user) action = so.action_set.create(flag=action, author=request.user) result = { "signoff_id": so.id, "latest_action_id": action.id, } return HttpResponse( json.dumps(result), content_type="application/json; charset=UTF-8" )
mpl-2.0
976,281,957,916,444,800
30.36875
77
0.541941
false
theovasi/browsewiki
toolset/make_topicspace.py
1
9101
import os import sys import logging import scipy import joblib import math import argparse from toolset.corpus import Corpus from gensim import corpora, models, matutils from sklearn.cluster import KMeans as kmeans from toolset import mogreltk from sklearn.neighbors import NearestNeighbors as nn from toolset.cluster_metrics import cluster_metrics from toolset.visualize import get_cluster_reps def make_topicspace(data_file_path, stopwords_file_path=None, n_topics=100, method='lda', n_clusters=12): # Allow gensim to print additional info while executing. logging.basicConfig( format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) corpus_frame = joblib.load('{}/corpus_frame.txt'.format(data_file_path)) filepaths = list(corpus_frame['filepath']) collection = Corpus(filepaths) print('-- Loaded corpus') # First pass of the collection to create the dictionary. if not os.path.exists(data_file_path + '/dictionary.txt'): print('-- Generating dictionary') dictionary = corpora.Dictionary() batch_size = 0 max_batch_size = 2000 batch = [] if stopwords_file_path is not None: with open(stopwords_file_path) as stopwords_file: stopwords = stopwords_file.read().splitlines() for i, text in enumerate(collection.document_generator()): if 'stopwords' in locals() and stopwords is not None: batch.append(mogreltk.stem(text, stopwords)) else: batch.append(mogreltk.stem(text)) batch_size += 1 if batch_size >= max_batch_size: dictionary.add_documents(batch, prune_at=20000) batch_size = 0 batch = [] dictionary.add_documents(batch, prune_at=20000) dictionary.filter_extremes() joblib.dump(dictionary, data_file_path + '/dictionary.txt') # Second pass of the collection to generate the bag of words representation. if not os.path.exists(data_file_path + '/corpus.txt'): if 'dictionary' not in locals(): dictionary = joblib.load(data_file_path + '/dictionary.txt') if stopwords_file_path is not None: with open(stopwords_file_path) as stopwords_file: stopwords = stopwords_file.read().splitlines() print('-- Loaded dictionary') print('-- Generating corpus') if 'stopwords' in locals() and stopwords is not None: corpus = [dictionary.doc2bow(mogreltk.stem(text, stopwords)) for text in collection.document_generator()] else: corpus = [dictionary.doc2bow(mogreltk.stem(text)) for text in collection.document_generator()] joblib.dump(corpus, data_file_path + '/corpus.txt') # Transform from BoW representation to tf-idf. if not os.path.exists(data_file_path + '/tfidf_model.txt'): if not 'corpus' in locals(): corpus = joblib.load(data_file_path + '/corpus.txt') print('-- Loaded corpus') print('-- Generating tf-idf matrix') tfidf = models.TfidfModel(corpus) joblib.dump(tfidf, data_file_path + '/tfidf_model.txt') corpus_tfidf = tfidf[corpus] tfidf_sparse = matutils.corpus2csc(corpus_tfidf) tfidf_sparse = scipy.sparse.csc_matrix.transpose(tfidf_sparse).tocsr() joblib.dump(tfidf_sparse, data_file_path + '/tfidf_sparse.txt') # Apply Latent Dirichlet Allocation. if not os.path.exists(data_file_path + '/topic_model.txt'): if not 'dictionary' in locals(): dictionary = joblib.load(data_file_path + '/dictionary.txt') print('-- Loaded dictionary') if not 'corpus' in locals(): corpus = joblib.load(data_file_path + '/corpus.txt') print('-- Loaded corpus') if not 'tfidf' in locals(): tfidf = joblib.load(data_file_path + '/tfidf_model.txt') print('-- Loaded tfidf model') corpus_tfidf = tfidf[corpus] if method == 'lsa': print('-- Applying Latent Semantic Analysis for {} topics'.format(n_topics)) lsa = models.lsimodel.LsiModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=n_topics) joblib.dump(lsa, data_file_path + '/topic_model.txt') transformed_corpus = lsa[corpus] else: print( '-- Applying Latent Dirichlet Allocation for {} topics'.format(n_topics)) lda = models.ldamodel.LdaModel(corpus=corpus_tfidf, id2word=dictionary, num_topics=n_topics, passes=2) joblib.dump(lda, data_file_path + '/topic_model.txt') transformed_corpus = lda[corpus] # Convert topic space matrix to sparse in the Compressed Sparse Row format. topic_space = matutils.corpus2csc(transformed_corpus) # Transpose the topic space matrix because it will be used with sklearn and # it needs the documents in the rows. topic_space = scipy.sparse.csc_matrix.transpose(topic_space).tocsr() joblib.dump(topic_space, data_file_path + '/topic_space.txt') # Apply clustering using KMeans if not os.path.exists(data_file_path + '/kmodel.txt'): if not 'topic_space' in locals(): topic_space = joblib.load(data_file_path + '/topic_space.txt') print('-- Loaded topic space matrix') best_silhouette_score = -1 best_kmodel = None for index in range(10): kmodel = kmeans(n_clusters=n_clusters, n_init=10) kmodel.fit(topic_space) silhouette_score = cluster_metrics(kmodel, topic_space) print( 'Calculated K-Means model {} with score {}.'.format(index, silhouette_score)) if best_silhouette_score < silhouette_score[0]: best_silhouette_score = silhouette_score[0] best_kmodel = kmodel dist_space = kmodel.transform(topic_space) print('Picked K-means model with silhouette score: {}'.format( best_silhouette_score)) joblib.dump(best_kmodel, data_file_path + '/kmodel.txt') joblib.dump(dist_space, data_file_path + '/dist_space.txt') if not os.path.exists('{}/lemmatizer.txt'.format(data_file_path)): lemmatizer = mogreltk.Lemmatizer() lemmatizer.fit(collection.document_generator(), stopwords_file_path, True) joblib.dump(lemmatizer, '{}/lemmatizer.txt'.format(data_file_path)) # Generate cluster labels. if not os.path.exists('{}/cluster_reps.txt'.format(data_file_path)): if not 'tfidf_sparse' in locals(): tfidf_sparse = joblib.load(data_file_path + '/tfidf_sparse.txt') print('-- Loaded tfidf matrix.') if not 'best_kmodel' in locals(): best_kmodel = joblib.load(data_file_path + '/kmodel.txt') print('-- Loaded K-means model.') if not 'dictionary' in locals(): dictionary = joblib.load(data_file_path + '/dictionary.txt') print('-- Loaded dictionary.') if not 'topic_space' in locals(): topic_space = joblib.load(data_file_path + '/topic_space.txt') print('-- Loaded topic space.') if not 'lemmatizer' in locals(): lemmatizer = joblib.load(data_file_path + '/lemmatizer.txt') print('-- Loaded lemmatizer.') cluster_reps, removed_terms = get_cluster_reps( tfidf_sparse, best_kmodel, topic_space, dictionary, lemmatizer) joblib.dump(cluster_reps, '{}/cluster_reps.txt'.format(data_file_path)) if not os.path.exists('{}/nn_model.txt'.format(data_file_path)): if 'tfidf_sparse' not in locals(): tfidf_sparse = joblib.load( '{}/tfidf_sparse.txt'.format(data_file_path)) nn_model = nn(n_neighbors=1000, radius=10) nn_model.fit(tfidf_sparse) joblib.dump(nn_model, '{}/nn_model.txt'.format(data_file_path)) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Process input filepath.') parser.add_argument('data_file_path', type=str, help='The path to the data directory.') parser.add_argument('-s', '--stop', type=str, help='The path to the stopwords file.') parser.add_argument('-t', '--n_topics', type=int, help='The number of topics that will be extracted.') parser.add_argument('-m', '--method', type=str, help='The topic modeling method to be used.') parser.add_argument('-k', '--n_clusters', type=int, help='The number of clusters to be created.') args = parser.parse_args() make_topicspace(data_file_path=args.data_file_path, stopwords_file_path=args.stop, n_topics=args.n_topics, method=args.method, n_clusters=args.n_clusters)
mit
1,782,624,964,027,688,400
47.153439
93
0.608175
false
d10genes/pollen
util/pollen_utils.py
1
1412
import datetime as dt import re from bs4 import BeautifulSoup from pandas import DataFrame, pandas as pd from pandas.compat import lmap import requests udate_re = re.compile(r'.+?/(\d+)/(\d+)/(\d+)') with open('../pfile.txt', 'r') as f: pollen_url = f.read().strip() def count_url_2date(u): # print(u) m_ = udate_re.match(u) dates = m_.groups() return dt.date(*map(int, dates)) def parse_pollen_href(a) -> ('date', 'pollen_ct'): u = a.attrs['href'] date = count_url_2date(u) if a.string and a.string.isdigit(): ct = int(a.string) else: # '', 'N/A' ct = -1 return date, ct def parse_pollen_page(html_txt) -> ('date', 'pollen_ct'): soup = BeautifulSoup(html_txt, "lxml") # sel = 'div.calendar-row.calendar-row-4 > div > div > span.count' sel = 'span.count > a' return DataFrame(lmap(parse_pollen_href, soup.select(sel)), columns=['Date', 'Count']) def pollen_date2df(yr, m): u = pollen_url.format(year=yr, month=m) r = requests.get(u) return parse_pollen_page(r.content) def pollen_data(yrmths): return pd.concat( [pollen_date2df(yr, m) for yr, m in yrmths], ignore_index=True ).sort_values('Date', ascending=True) pscale = DataFrame([ ['0', 'Absent'], ['1-14', 'Low'], ['15-89', 'Moderate'], ['90-1499', 'High'], ['1500+', 'Very High'], ], columns=['Count', 'Level'])
mit
-7,393,965,330,896,391,000
22.932203
90
0.59136
false
gadsbyfly/PyBioMed
PyBioMed/test/test_PyInteration.py
1
2818
# -*- coding: utf-8 -*- # Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao # All rights reserved. # This file is part of the PyBioMed. # The contents are covered by the terms of the BSD license # which is included in the file license.txt, found at the root # of the PyBioMed source tree. """ The script is used for testing. Authors: Zhijiang Yao and Dongsheng Cao. Date: 2016.06.14 Email: [email protected] """ # Core Library modules import os def test_pyinteration(): from PyBioMed.PyInteraction.PyInteraction import CalculateInteraction1 from PyBioMed.PyInteraction.PyInteraction import CalculateInteraction2 from PyBioMed.PyInteraction.PyInteraction import CalculateInteraction3 from PyBioMed.PyDNA import PyDNAac print("...............................................................") print("testing the DNA descriptors") DNA_des = PyDNAac.GetTCC( "GACTGAACTGCACTTTGGTTTCATATTATTTGCTC", phyche_index=["Dnase I", "Nucleosome", "MW-kg"], ) print(DNA_des) print("...............................................................") print("testing the protein descriptors") from PyBioMed.PyProtein import CTD protein = "ADGCGVGEGTGQGPMCNCMCMKWVYADEDAADLESDSFADEDASLESDSFPWSNQRVFCSFADEDAS" protein_des = CTD.CalculateCTD(protein) print("...............................................................") print("testing the molecular descriptors") from PyBioMed.PyMolecule import moe from rdkit import Chem smis = ["CCCC", "CCCCC", "CCCCCC", "CC(N)C(=O)O", "CC(N)C(=O)[O-].[Na+]"] m = Chem.MolFromSmiles(smis[3]) mol_des = moe.GetMOE(m) print("...............................................................") print("testing the Interaction type 1 module") mol_mol_interaction1 = CalculateInteraction1(mol_des, mol_des) print(mol_mol_interaction1) pro_mol_interaction1 = CalculateInteraction1(mol_des, protein_des) print(pro_mol_interaction1) DNA_mol_interaction1 = CalculateInteraction1(DNA_des, mol_des) print(DNA_mol_interaction1) print("...............................................................") print("testing the Interaction type 2 module") mol_mol_interaction2 = CalculateInteraction2(mol_des, mol_des) print(mol_mol_interaction2) pro_mol_interaction2 = CalculateInteraction2(mol_des, protein_des) print(pro_mol_interaction2) DNA_mol_interaction2 = CalculateInteraction2(DNA_des, mol_des) print(DNA_mol_interaction2) print("...............................................................") print("testing the Interaction type 3 module") mol_mol_interaction3 = CalculateInteraction3(mol_des, mol_des) print(mol_mol_interaction3) if __name__ == "__main__": test_pyinteration()
bsd-3-clause
-5,770,712,260,648,081,000
29.967033
83
0.611427
false
tipsybear/zerocycle
zerocycle/ingest/__init__.py
1
3468
# zerocycle.ingest # Handles a variety of data ingestion and loading tasks # # Author: Benjamin Bengfort <[email protected]> # Created: Mon Jul 14 23:20:57 2014 -0400 # # Copyright (C) 2014 Bengfort.com # For license information, see LICENSE.txt # # ID: __init__.py [] [email protected] $ """ Handles a variety of data ingestion and loading tasks """ ########################################################################## ## Imports ########################################################################## from zerocycle.db.models import * from zerocycle.exceptions import * from zerocycle.db import create_session from monthly import MonthlyReportReader from accounts import AccountsReportReader from base import ReportReader, CSVReportReader, ExcelReportReader ########################################################################## ## Module Constants ########################################################################## READERS = { "MONTHLY": MonthlyReportReader, "ACCOUNTS": AccountsReportReader, "EXCEL": ExcelReportReader, "CSV": CSVReportReader } ########################################################################## ## Database access functions ########################################################################## def insert_or_update(session, obj): """ Temporary insert or update functionality; should go to the Manager. """ if isinstance(obj, Route): # Do Route Lookup instance = session.query(Route).filter_by(name=obj.name).first() elif isinstance(obj, Pickup): # Do Pickup Lookup obj.route_id = obj.route.id instance = session.query(Pickup).filter_by(date=obj.date, route_id=obj.route.id, vehicle=obj.vehicle).first() else: instance = None if not instance: print "add" session.add(obj) return obj, True else: print "update" obj.id = instance.id session.merge(obj) return obj, False ########################################################################## ## Ingestion functions ########################################################################## def ingest_report(report_type, path, **kwargs): """ Accepts a report and a report_type, then creates a session and for every item that the report spits out, it saves the item to the database and then returns the item. If commit is passed into kwargs as False, this will not commit to the database, but instead just return the objects as they come. """ commit = kwargs.pop("commit", True) report_type = report_type.upper() if report_type not in READERS: raise IngestionException("No Report type called '%s'" % report_type) reader = READERS[report_type](path, **kwargs) session = create_session() for item in reader: if isinstance(item, Base): yield insert_or_update(session, item) else: for obj in item: yield insert_or_update(session, obj) if commit: session.commit() session.close() def ingest_monthly_report(path, **kwargs): """ Alias for monthly reports ingestion. """ for item in ingest_report('MONTHLY', path, **kwargs): yield item def ingest_accounts_report(path, **kwargs): """ Alias for accounts reports ingestion. """ for item in ingest_report('ACCOUNTS', path, **kwargs): yield item
gpl-2.0
-2,676,293,151,082,858,000
29.964286
117
0.544694
false
cezarfx/zorba
swig/python/tests/test07.2.py
1
2529
# Copyright 2006-2011 The FLWOR Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.path.insert(0, '@pythonPath@') import zorba_api def test(zorba): xquery = zorba.compileQuery("<a>text-a1<b at1='att1' at2='{1+2}'>text-b1</b>text-a2</a>") saxHandler = zorba_api.SAX2ContentHandlerProxy() saxHandler.setStartDocumentHandler(startDocumentHandler); saxHandler.setEndDocumentHandler(endDocumentHandler); saxHandler.setStartElementHandler(startElementHandler); saxHandler.setEndElementHandler(endElementHandler); saxHandler.setCharactersHandler(charactersHandler); saxHandler.setProcessingInstructionHandler(processingInstructionHandler); saxHandler.setIgnorableWhitespaceHandler(ignorableWhitespaceHandler); saxHandler.setStartPrefixMappingHandler(startPrefixMappingHandler); saxHandler.setEndPrefixMappingHandler(endPrefixMappingHandler); saxHandler.setSkippedEntityHandler(skippedEntityHandler); xquery.executeSAX(saxHandler) return def startDocumentHandler(): print "Start Document" return def endDocumentHandler(): print "End Document" return def startElementHandler(URI, localName, QName, SAXAttributes): print "Start Element - ", QName return def endElementHandler(URI, localName, QName): print "End Element - ", QName return def charactersHandler(text): print "Characters - ", text return def processingInstructionHandler(target, data): print "Processing Instruction" return def ignorableWhitespaceHandler(text): print "Ignorable Whitespace - ", text return def startPrefixMappingHandler(prefix, URI): print "Start Prefix Mapping - ", prefix return def endPrefixMappingHandler(prefix): print "End Prefix Mapping - ", prefix return def skippedEntityHandler(name): print "Skipped Entity - ", name return store = zorba_api.InMemoryStore_getInstance() zorba = zorba_api.Zorba_getInstance(store) print "Running: XQuery execute - executeSAX" test(zorba) print "Success" zorba.shutdown() zorba_api.InMemoryStore_shutdown(store)
apache-2.0
3,346,817,728,684,510,700
30.222222
92
0.7845
false
gregcaporaso/scikit-bio
skbio/io/format/embl.py
4
53962
""" EMBL format (:mod:`skbio.io.format.embl`) ========================================= .. currentmodule:: skbio.io.format.embl EMBL format stores sequence and its annotation together. The start of the annotation section is marked by a line beginning with the word "ID". The start of sequence section is marked by a line beginning with the word "SQ". The "//" (terminator) line also contains no data or comments and designates the end of an entry. More information on EMBL file format can be found here [1]_. The EMBL file may end with .embl or .txt extension. An example of EMBL file can be seen here [2]_. Feature Level Products ^^^^^^^^^^^^^^^^^^^^^^ As described in [3]_ *"Feature-level products contain nucleotide sequence and related annotations derived from submitted ENA assembled and annotated sequences. Data are distributed in flatfile format, similar to that of parent ENA records, with each flatfile representing a single feature"*. While only the sequence of the feature is included in such entries, features are derived from the parent entry, and can't be applied as interval metadata. For such reason, interval metatdata are ignored from Feature-level products, as they will be ignored by subsetting a generic Sequence object. Format Support -------------- **Has Sniffer: Yes** **NOTE: No protein support at the moment** Current protein support development is tracked in issue-1499 [4]_ +------+------+---------------------------------------------------------------+ |Reader|Writer| Object Class | +======+======+===============================================================+ |Yes |Yes |:mod:`skbio.sequence.Sequence` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.DNA` | +------+------+---------------------------------------------------------------+ |Yes |Yes |:mod:`skbio.sequence.RNA` | +------+------+---------------------------------------------------------------+ |No |No |:mod:`skbio.sequence.Protein` | +------+------+---------------------------------------------------------------+ |Yes |Yes | generator of :mod:`skbio.sequence.Sequence` objects | +------+------+---------------------------------------------------------------+ Format Specification -------------------- **State: Experimental as of 0.5.1-dev.** Sections before ``FH (Feature Header)`` ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ All the sections before ``FH (Feature Header)`` will be read into the attribute of ``metadata``. The header and its content of a section are stored as key-value pairs in ``metadata``. For the ``RN (Reference Number)`` section, its value is stored as a list, as there are often multiple reference sections in one EMBL record. ``FT`` section ^^^^^^^^^^^^^^ See :ref:`Genbank FEATURES section<genbank_feature_section>` ``SQ`` section ^^^^^^^^^^^^^^ The sequence in the ``SQ`` section is always in lowercase for the EMBL files downloaded from ENA. For the RNA molecules, ``t`` (thymine), instead of ``u`` (uracil) is used in the sequence. All EMBL writers follow these conventions while writing EMBL files. Examples -------- Reading EMBL Files ^^^^^^^^^^^^^^^^^^ Suppose we have the following EMBL file example: >>> embl_str = ''' ... ID X56734; SV 1; linear; mRNA; STD; PLN; 1859 BP. ... XX ... AC X56734; S46826; ... XX ... DT 12-SEP-1991 (Rel. 29, Created) ... DT 25-NOV-2005 (Rel. 85, Last updated, Version 11) ... XX ... DE Trifolium repens mRNA for non-cyanogenic beta-glucosidase ... XX ... KW beta-glucosidase. ... XX ... OS Trifolium repens (white clover) ... OC Eukaryota; Viridiplantae; Streptophyta; Embryophyta; Tracheophyta; ... OC Spermatophyta; Magnoliophyta; eudicotyledons; Gunneridae; ... OC Pentapetalae; rosids; fabids; Fabales; Fabaceae; Papilionoideae; ... OC Trifolieae; Trifolium. ... XX ... RN [5] ... RP 1-1859 ... RX DOI; 10.1007/BF00039495. ... RX PUBMED; 1907511. ... RA Oxtoby E., Dunn M.A., Pancoro A., Hughes M.A.; ... RT "Nucleotide and derived amino acid sequence of the cyanogenic ... RT beta-glucosidase (linamarase) from white clover ... RT (Trifolium repens L.)"; ... RL Plant Mol. Biol. 17(2):209-219(1991). ... XX ... RN [6] ... RP 1-1859 ... RA Hughes M.A.; ... RT ; ... RL Submitted (19-NOV-1990) to the INSDC. ... RL Hughes M.A., University of Newcastle Upon Tyne, Medical School, ... RL Newcastle ... RL Upon Tyne, NE2 4HH, UK ... XX ... DR MD5; 1e51ca3a5450c43524b9185c236cc5cc. ... XX ... FH Key Location/Qualifiers ... FH ... FT source 1..1859 ... FT /organism="Trifolium repens" ... FT /mol_type="mRNA" ... FT /clone_lib="lambda gt10" ... FT /clone="TRE361" ... FT /tissue_type="leaves" ... FT /db_xref="taxon:3899" ... FT mRNA 1..1859 ... FT /experiment="experimental evidence, no additional ... FT details recorded" ... FT CDS 14..1495 ... FT /product="beta-glucosidase" ... FT /EC_number="3.2.1.21" ... FT /note="non-cyanogenic" ... FT /db_xref="GOA:P26204" ... FT /db_xref="InterPro:IPR001360" ... FT /db_xref="InterPro:IPR013781" ... FT /db_xref="InterPro:IPR017853" ... FT /db_xref="InterPro:IPR033132" ... FT /db_xref="UniProtKB/Swiss-Prot:P26204" ... FT /protein_id="CAA40058.1" ... FT /translation="MDFIVAIFALFVISSFTITSTNAVEASTLLDIGNLSRS ... FT SFPRGFIFGAGSSAYQFEGAVNEGGRGPSIWDTFTHKYPEKIRDGSNADITV ... FT DQYHRYKEDVGIMKDQNMDSYRFSISWPRILPKGKLSGGINHEGIKYYNNLI ... FT NELLANGIQPFVTLFHWDLPQVLEDEYGGFLNSGVINDFRDYTDLCFKEFGD ... FT RVRYWSTLNEPWVFSNSGYALGTNAPGRCSASNVAKPGDSGTGPYIVTHNQI ... FT LAHAEAVHVYKTKYQAYQKGKIGITLVSNWLMPLDDNSIPDIKAAERSLDFQ ... FT FGLFMEQLTTGDYSKSMRRIVKNRLPKFSKFESSLVNGSFDFIGINYYSSSY ... FT ISNAPSHGNAKPSYSTNPMTNISFEKHGIPLGPRAASIWIYVYPYMFIQEDF ... FT EIFCYILKINITILQFSITENGMNEFNDATLPVEEALLNTYRIDYYYRHLYY ... FT IRSAIRAGSNVKGFYAWSFLDCNEWFAGFTVRFGLNFVD" ... XX ... SQ Sequence 1859 BP; 609 A; 314 C; 355 G; 581 T; 0 other; ... aaacaaacca aatatggatt ttattgtagc catatttgct ctgtttgtta ttagctcatt ... cacaattact tccacaaatg cagttgaagc ttctactctt cttgacatag gtaacctgag ... tcggagcagt tttcctcgtg gcttcatctt tggtgctgga tcttcagcat accaatttga ... aggtgcagta aacgaaggcg gtagaggacc aagtatttgg gataccttca cccataaata ... tccagaaaaa ataagggatg gaagcaatgc agacatcacg gttgaccaat atcaccgcta ... caaggaagat gttgggatta tgaaggatca aaatatggat tcgtatagat tctcaatctc ... ttggccaaga atactcccaa agggaaagtt gagcggaggc ataaatcacg aaggaatcaa ... atattacaac aaccttatca acgaactatt ggctaacggt atacaaccat ttgtaactct ... ttttcattgg gatcttcccc aagtcttaga agatgagtat ggtggtttct taaactccgg ... tgtaataaat gattttcgag actatacgga tctttgcttc aaggaatttg gagatagagt ... gaggtattgg agtactctaa atgagccatg ggtgtttagc aattctggat atgcactagg ... aacaaatgca ccaggtcgat gttcggcctc caacgtggcc aagcctggtg attctggaac ... aggaccttat atagttacac acaatcaaat tcttgctcat gcagaagctg tacatgtgta ... taagactaaa taccaggcat atcaaaaggg aaagataggc ataacgttgg tatctaactg ... gttaatgcca cttgatgata atagcatacc agatataaag gctgccgaga gatcacttga ... cttccaattt ggattgttta tggaacaatt aacaacagga gattattcta agagcatgcg ... gcgtatagtt aaaaaccgat tacctaagtt ctcaaaattc gaatcaagcc tagtgaatgg ... ttcatttgat tttattggta taaactatta ctcttctagt tatattagca atgccccttc ... acatggcaat gccaaaccca gttactcaac aaatcctatg accaatattt catttgaaaa ... acatgggata cccttaggtc caagggctgc ttcaatttgg atatatgttt atccatatat ... gtttatccaa gaggacttcg agatcttttg ttacatatta aaaataaata taacaatcct ... gcaattttca atcactgaaa atggtatgaa tgaattcaac gatgcaacac ttccagtaga ... agaagctctt ttgaatactt acagaattga ttactattac cgtcacttat actacattcg ... ttctgcaatc agggctggct caaatgtgaa gggtttttac gcatggtcat ttttggactg ... taatgaatgg tttgcaggct ttactgttcg ttttggatta aactttgtag attagaaaga ... tggattaaaa aggtacccta agctttctgc ccaatggtac aagaactttc tcaaaagaaa ... ctagctagta ttattaaaag aactttgtag tagattacag tacatcgttt gaagttgagt ... tggtgcacct aattaaataa aagaggttac tcttaacata tttttaggcc attcgttgtg ... aagttgttag gctgttattt ctattatact atgttgtagt aataagtgca ttgttgtacc ... agaagctatg atcataacta taggttgatc cttcatgtat cagtttgatg ttgagaatac ... tttgaattaa aagtcttttt ttattttttt aaaaaaaaaa aaaaaaaaaa aaaaaaaaa ... // ... ''' Now we can read it as ``DNA`` object: >>> import io >>> from skbio import DNA, RNA, Sequence >>> embl = io.StringIO(embl_str) >>> dna_seq = DNA.read(embl) >>> dna_seq DNA ---------------------------------------------------------------------- Metadata: 'ACCESSION': 'X56734; S46826;' 'CROSS_REFERENCE': <class 'list'> 'DATE': <class 'list'> 'DBSOURCE': 'MD5; 1e51ca3a5450c43524b9185c236cc5cc.' 'DEFINITION': 'Trifolium repens mRNA for non-cyanogenic beta- glucosidase' 'KEYWORDS': 'beta-glucosidase.' 'LOCUS': <class 'dict'> 'REFERENCE': <class 'list'> 'SOURCE': <class 'dict'> 'VERSION': 'X56734.1' Interval metadata: 3 interval features Stats: length: 1859 has gaps: False has degenerates: False has definites: True GC-content: 35.99% ---------------------------------------------------------------------- 0 AAACAAACCA AATATGGATT TTATTGTAGC CATATTTGCT CTGTTTGTTA TTAGCTCATT 60 CACAATTACT TCCACAAATG CAGTTGAAGC TTCTACTCTT CTTGACATAG GTAACCTGAG ... 1740 AGAAGCTATG ATCATAACTA TAGGTTGATC CTTCATGTAT CAGTTTGATG TTGAGAATAC 1800 TTTGAATTAA AAGTCTTTTT TTATTTTTTT AAAAAAAAAA AAAAAAAAAA AAAAAAAAA Since this is a mRNA molecule, we may want to read it as ``RNA``. As the EMBL file usually have ``t`` instead of ``u`` in the sequence, we can read it as ``RNA`` by converting ``t`` to ``u``: >>> embl = io.StringIO(embl_str) >>> rna_seq = RNA.read(embl) >>> rna_seq RNA ---------------------------------------------------------------------- Metadata: 'ACCESSION': 'X56734; S46826;' 'CROSS_REFERENCE': <class 'list'> 'DATE': <class 'list'> 'DBSOURCE': 'MD5; 1e51ca3a5450c43524b9185c236cc5cc.' 'DEFINITION': 'Trifolium repens mRNA for non-cyanogenic beta- glucosidase' 'KEYWORDS': 'beta-glucosidase.' 'LOCUS': <class 'dict'> 'REFERENCE': <class 'list'> 'SOURCE': <class 'dict'> 'VERSION': 'X56734.1' Interval metadata: 3 interval features Stats: length: 1859 has gaps: False has degenerates: False has definites: True GC-content: 35.99% ---------------------------------------------------------------------- 0 AAACAAACCA AAUAUGGAUU UUAUUGUAGC CAUAUUUGCU CUGUUUGUUA UUAGCUCAUU 60 CACAAUUACU UCCACAAAUG CAGUUGAAGC UUCUACUCUU CUUGACAUAG GUAACCUGAG ... 1740 AGAAGCUAUG AUCAUAACUA UAGGUUGAUC CUUCAUGUAU CAGUUUGAUG UUGAGAAUAC 1800 UUUGAAUUAA AAGUCUUUUU UUAUUUUUUU AAAAAAAAAA AAAAAAAAAA AAAAAAAAA We can also ``trascribe`` a sequence and verify that it will be a ``RNA`` sequence >>> rna_seq == dna_seq.transcribe() True Reading EMBL Files using generators ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Soppose we have an EMBL file with multiple records: we can instantiate a generator object to deal with multiple records >>> import skbio >>> embl = io.StringIO(embl_str) >>> embl_gen = skbio.io.read(embl, format="embl") >>> dna_seq = next(embl_gen) For more informations, see :mod:`skbio.io` References ---------- .. [1] ftp://ftp.ebi.ac.uk/pub/databases/embl/release/doc/usrman.txt .. [2] http://www.ebi.ac.uk/ena/data/view/X56734&display=text .. [3] http://www.ebi.ac.uk/ena/browse/feature-level-products .. [4] https://github.com/biocore/scikit-bio/issues/1499 """ # ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- # std modules import re import copy import textwrap from functools import partial # skbio modules from skbio.io import create_format, EMBLFormatError from skbio.io.format._base import (_line_generator, _get_nth_sequence) from skbio.io.format._sequence_feature_vocabulary import ( _yield_section, _parse_single_feature, _serialize_section_default, _serialize_single_feature) from skbio.metadata import IntervalMetadata from skbio.sequence import Sequence, DNA, RNA, Protein from skbio.util._misc import chunk_str # look at skbio.io.registry to have an idea on how to define this class embl = create_format('embl') # This list is ordered used to read and write embl file. By processing those # values one by one, I will write embl sections with the same order _HEADERS = [ 'LOCUS', 'ACCESSION', 'PARENT_ACCESSION', 'PROJECT_IDENTIFIER', 'DATE', 'DEFINITION', 'GENE_NAME', 'KEYWORDS', 'SOURCE', 'REFERENCE', 'DBSOURCE', 'COMMENT', 'FEATURES' ] # embl has a series of keys different from genbank; moreover keys are not so # easy to understand (eg. RA for AUTHORS). I want to use the same keys used by # genbank both to convert between formats and to use the same methods to get # info from Sequence and its derived objects Here is a dictionary of keys # conversion (EMBL->GB). All the unspecified keys will remain in embl format KEYS_TRANSLATOR = { # identification 'ID': 'LOCUS', 'AC': 'ACCESSION', # PA means PARENT ACCESSION (?) and applies to # feature-level-products entries 'PA': 'PARENT_ACCESSION', 'PR': 'PROJECT_IDENTIFIER', 'DT': 'DATE', 'DE': 'DEFINITION', 'GN': 'GENE_NAME', # uniprot specific 'KW': 'KEYWORDS', # Source (taxonomy and classification) 'OS': 'ORGANISM', 'OC': 'taxonomy', 'OG': 'organelle', # reference keys 'RA': 'AUTHORS', 'RP': 'REFERENCE', 'RC': 'REFERENCE_COMMENT', 'RX': 'CROSS_REFERENCE', 'RG': 'GROUP', 'RT': 'TITLE', 'RL': 'JOURNAL', # Cross references 'DR': 'DBSOURCE', 'CC': 'COMMENT', # features 'FH': 'FEATURES', 'FT': 'FEATURES', 'SQ': 'ORIGIN', } # the inverse of KEYS_TRANSLATOR, for semplicity REV_KEYS_TRANSLATOR = {v: k for k, v in KEYS_TRANSLATOR.items()} # the original genbank _yield_section divides entries in sections relying on # spaces (the same section has the same level of indentation). EMBL entries # have a key for each line, so to divide record in sections I need to define a # correspondance for each key to section, then I will divide a record in # sections using these section name. KEYS_2_SECTIONS = { # identification 'ID': 'LOCUS', 'AC': 'ACCESSION', # PA means PARENT ACCESSION (?) and applies to # feature-level-products entries 'PA': 'PARENT_ACCESSION', 'PR': 'PROJECT_IDENTIFIER', 'DT': 'DATE', 'DE': 'DEFINITION', 'GN': 'GENE_NAME', # uniprot specific 'KW': 'KEYWORDS', # Source (taxonomy and classification) 'OS': 'SOURCE', 'OC': 'SOURCE', 'OG': 'SOURCE', # reference keys 'RA': 'REFERENCE', 'RP': 'REFERENCE', 'RC': 'REFERENCE', 'RX': 'REFERENCE', 'RG': 'REFERENCE', 'RT': 'REFERENCE', 'RL': 'REFERENCE', # This shuold be Reference Number. However, to split # between references with _embl_yield_section I need to # change section after reading one reference. So a single # reference is completed when I found a new RN. The # reference number information will be the reference # position in the final REFERENCE list metadata 'RN': 'SPACER', # Cross references 'DR': 'DBSOURCE', 'CC': 'COMMENT', 'AH': 'ASSEMBLY', 'AS': 'ASSEMBLY', 'FH': 'FEATURES', 'FT': 'FEATURES', # sequence 'SQ': 'ORIGIN', ' ': 'ORIGIN', 'CO': 'CONSTRUCTED', # spacer (discarded) 'XX': 'SPACER' } # for convenience: I think such functions are more readadble while accessing # values in lambda functions def _get_embl_key(line): """Return first part of a string as a embl key (ie 'AC M14399;' -> 'AC')""" # embl keys have a fixed size of 2 chars return line[:2] def _get_embl_section(line): """Return the embl section from uniprot key(ie 'RA' -> 'REFERENCE')""" # get embl key key = _get_embl_key(line) # get embl section from key section = KEYS_2_SECTIONS[key] return section def _translate_key(key): """A method to translate a single key from EMBL to genbank. Returns key itself if no traslation is defined""" return KEYS_TRANSLATOR.get(key, key) # a method to translate keys from embl to genbank for a dict object. All keys # not defined in the original dict will remain the same def _translate_keys(data): """Translate a dictionary of uniprot key->value in a genbank like dictionary of key values. Keep old keys if no translation is defined""" # traslate keys and get a new_data object new_data = {_translate_key(k): v for k, v in data.items()} return new_data # define a default textwrap.Wrapper for embl def _get_embl_wrapper(embl_key, indent=5, subsequent_indent=None, width=80): """Returns a textwrap.TextWrapper for embl records (eg, write <key> <string> by providing embl key and a string. Wrap text to 80 column""" # define the string to prepen (eg "OC ") prepend = '{key:<{indent}}'.format(key=embl_key, indent=indent) # deal with 2° strings and more if subsequent_indent is None: subsequent_prepend = prepend else: subsequent_prepend = '{key:<{indent}}'.format( key=embl_key, indent=subsequent_indent) # define a text wrapper object wrapper = textwrap.TextWrapper( initial_indent=prepend, subsequent_indent=subsequent_prepend, width=width ) return wrapper def _serialize_list(embl_wrapper, data, sep="\n"): """Serialize a list of obj using a textwrap.TextWrapper instance. Returns one string of wrapped embl objects""" # the output array output = [] for line in data: output += embl_wrapper.wrap(line) # merge dates in one string. Add final newline output = sep.join(output) + "\n" # return comupted string return output # Method to determine if file is in EMBL format or not. A uniprot embl format # can't be parsed by this module (at the moment) @embl.sniffer() def _embl_sniffer(fh): try: line = next(_line_generator(fh, skip_blanks=True, strip=False)) except StopIteration: return False, {} try: _parse_id([line]) except EMBLFormatError: return False, {} return True, {} @embl.reader(None) def _embl_to_generator(fh, constructor=None, **kwargs): for record in _parse_embls(fh): yield _construct(record, constructor, **kwargs) # Method to read EMBL data as skbio.sequence.DNA @embl.reader(Sequence) def _embl_to_sequence(fh, seq_num=1, **kwargs): record = _get_nth_sequence(_parse_embls(fh), seq_num) return _construct(record, Sequence, **kwargs) # Method to read EMBL data as skbio.sequence.DNA @embl.reader(DNA) def _embl_to_dna(fh, seq_num=1, **kwargs): record = _get_nth_sequence(_parse_embls(fh), seq_num) return _construct(record, DNA, **kwargs) # Method to read EMBL data as skbio.sequence.DNA @embl.reader(RNA) def _embl_to_rna(fh, seq_num=1, **kwargs): record = _get_nth_sequence(_parse_embls(fh), seq_num) return _construct(record, RNA, **kwargs) # No protein support at the moment @embl.reader(Protein) def _embl_to_protein(fh, seq_num=1, **kwargs): # no protein support, at the moment raise EMBLFormatError("There's no protein support for EMBL record. " "Current status of EMBL protein support is " "described in issue-1499 (https://github.com/" "biocore/scikit-bio/issues/1499)") # Writer methods @embl.writer(None) def _generator_to_embl(obj, fh): for obj_i in obj: _serialize_single_embl(obj_i, fh) @embl.writer(Sequence) def _sequence_to_embl(obj, fh): _serialize_single_embl(obj, fh) @embl.writer(DNA) def _dna_to_embl(obj, fh): _serialize_single_embl(obj, fh) @embl.writer(RNA) def _rna_to_embl(obj, fh): _serialize_single_embl(obj, fh) @embl.writer(Protein) def _protein_to_embl(obj, fh): # no protein support, at the moment raise EMBLFormatError("There's no protein support for EMBL record. " "Current status of EMBL protein support is " "described in issue-1499 (https://github.com/" "biocore/scikit-bio/issues/1499)") def _construct(record, constructor=None, **kwargs): '''Construct the object of Sequence, DNA, RNA, or Protein.''' # sequence, metadata and interval metadata seq, md, imd = record if 'lowercase' not in kwargs: kwargs['lowercase'] = True if constructor is None: unit = md['LOCUS']['unit'] if unit == 'bp': # RNA mol type has T instead of U for genbank from from NCBI constructor = DNA elif unit == 'aa': # no protein support, at the moment # constructor = Protein raise EMBLFormatError("There's no protein support for EMBL record") if constructor == RNA: return DNA( seq, metadata=md, interval_metadata=imd, **kwargs).transcribe() else: return constructor( seq, metadata=md, interval_metadata=imd, **kwargs) # looks like the genbank _parse_genbank def _parse_embls(fh): """Chunck multiple EMBL records by '//', and returns a generator""" data_chunks = [] for line in _line_generator(fh, skip_blanks=True, strip=False): if line.startswith('//'): yield _parse_single_embl(data_chunks) data_chunks = [] else: data_chunks.append(line) def _parse_single_embl(chunks): metadata = {} interval_metadata = None sequence = '' # define a section splitter with _embl_yield_section function defined in # this module (return the embl section by embl key). returns generator for # each block with different line type section_splitter = _embl_yield_section( lambda line: _get_embl_section(line), skip_blanks=True, strip=False) # process each section, like genbank does. for section, section_name in section_splitter(chunks): # section is a list of records with the same session (eg RA, RP for # for a single reference). section_name is the name of the section # (eg REFERENCE for the section of the previous example) # search for a specific method in PARSER_TABLE using section_name or # set _embl_parse_section_default parser = _PARSER_TABLE.get( section_name, _embl_parse_section_default) if section_name == 'FEATURES': # This requires 'ID' line parsed before 'FEATURES', which should # be true and is implicitly checked by the sniffer. This is true # since the first section is parsed by the last else condition if "PARENT_ACCESSION" in metadata: # this is a feature-level-products entry and features are # relative to parent accession; in the same way a subset of a # Sequence object has no interval metadata, I will refuse to # process interval metadata here continue # partials add arguments to previous defined functions, in this # case length of Sequence object parser = partial( parser, length=metadata["LOCUS"]["size"]) elif section_name == "COMMENT": # mantain newlines in comments # partials add arguments to previous defined functions parser = partial( parser, join_delimiter="\n") # call function on section parsed = parser(section) # reference can appear multiple times if section_name == 'REFERENCE': # genbank data hasn't CROSS_REFERENCE section, To have a similar # metatadata object, I chose to remove CROSS_REFERENCE from # each single reference and put them in metadata. Since I could # have more references, I need a list of CROSS_REFERENCE, with # None values when CROSS_REFERENCE are not defined: there are cases # in which some references have a CROSS_REFERENCE and others not. # So each reference will have it's cross reference in the same # index position, defined or not cross_reference = parsed.pop("CROSS_REFERENCE", None) # fix REFERENCE metadata. Ask if is the first reference or not # I need a reference number as genbank, this could be reference # size if section_name in metadata: RN = len(metadata[section_name]) + 1 else: RN = 1 # fix reference fields. Get RN->REFERENCE value from dict positions = parsed.pop("REFERENCE", None) parsed["REFERENCE"] = str(RN) # append position to RN (eg "1 (bases 1 to 63)") if positions: parsed["REFERENCE"] += " %s" % (positions) # cross_reference will be a list of cross reference; Also # metadata[REFERENCE] is a list of references if section_name in metadata: # I've already seen a reference, append new one metadata[section_name].append(parsed) metadata["CROSS_REFERENCE"].append(cross_reference) else: # define a list for this first reference and its RX metadata[section_name] = [parsed] metadata["CROSS_REFERENCE"] = [cross_reference] elif section_name == 'ORIGIN': sequence = parsed elif section_name == 'FEATURES': interval_metadata = parsed elif section_name == 'DATE': # read data (list) metadata[section_name] = parsed # fix locus metadata using last date. Take only last date date = metadata[section_name][-1].split()[0] metadata["LOCUS"]["date"] = date # parse all the others sections (SOURCE, ...) else: metadata[section_name] = parsed # after metadata were read, add a VERSION section like genbank # eval if entry is a feature level product or not if "ACCESSION" in metadata: metadata["VERSION"] = "{accession}.{version}".format( accession=metadata["ACCESSION"].split(";")[0], version=metadata["LOCUS"]["version"]) elif "PARENT_ACCESSION" in metadata: # locus name is in the format # <accession>.<version>:<feature location>:<feature name>[:ordinal] # and ordinal could be present or not, depends on how many features # are found in such location. Such entry couldn't be found in others # database like NCBI (at the moment) so we will take the version # relying on parent accession (hoping that an update in the parent # accession will generate an update in all feature level products) metadata["VERSION"] = metadata["PARENT_ACCESSION"] # return a string, metatdata as a dictionary and IntervalMetadata object return sequence, metadata, interval_metadata def _write_serializer(fh, serializer, embl_key, data): """A simple method to write serializer to a file. Append 'XX'""" # call the serializer function out = serializer(embl_key, data) # test if 'out' is a iterator. # cf. Effective Python Item 17 if iter(out) is iter(out): for s in out: fh.write(s) else: fh.write(out) # add spacer between sections fh.write("XX\n") # main function for writer methods def _serialize_single_embl(obj, fh): '''Write a EMBL record. Always write it in ENA canonical way: 1. sequence in lowercase (uniprot are uppercase) 2. 'u' as 't' even in RNA molecules. Parameters ---------- obj : Sequence or its child class ''' # shortcut to deal with metadata md = obj.metadata # embl has a different magick number than embl serialize_default = partial( _serialize_section_default, indent=5) # Now cicle for GB like headers (sections) in _HEADERS. for header in _HEADERS: # Get appropriate serializer method or default one serializer = _SERIALIZER_TABLE.get( header, serialize_default) # headers needs to be converted into embl, or matained as they are # if no conversion could be defined. embl_key = REV_KEYS_TRANSLATOR.get(header, header) # this is true also for locus line if header in md: # deal with special source case, add cross references if needed if header == "REFERENCE": serializer = partial( serializer, cross_references=md.get("CROSS_REFERENCE")) elif header == "LOCUS": # pass also metadata (in case of entries from genbank) serializer = partial( serializer, metadata=md) # call the serializer function _write_serializer(fh, serializer, embl_key, md[header]) else: # header not in metadata. Could be date read from GB? if header == "DATE": # Have I date in locus metadata? if md["LOCUS"]["date"]: # call serializer on date. Date is a list of values _write_serializer( fh, serializer, embl_key, [md["LOCUS"]["date"]]) if header == 'FEATURES': if obj.has_interval_metadata(): # magic number 21: the amount of indentation before # feature table starts as defined by INSDC indent = 21 feature_key = "FH Key" fh.write('{header:<{indent}}Location/Qualifiers\n'.format( header=feature_key, indent=indent)) # add FH spacer fh.write("FH\n") for s in serializer(obj.interval_metadata._intervals, indent): fh.write(s) # add spacer between sections fh.write("XX\n") # write out the sequence # always write RNA seq as DNA if isinstance(obj, RNA): obj = obj.reverse_transcribe() # serialize sequence from a Sequence object for s in _serialize_sequence(obj): fh.write(s) # terminate a embl record with fh.write('//\n') def _parse_id(lines): """ From EMBL user manual (Release 130, November 2016) (ftp://ftp.ebi.ac.uk/pub/databases/embl/release/doc/usrman.txt) The ID (IDentification) line is always the first line of an entry. The format of the ID line is: ID <1>; SV <2>; <3>; <4>; <5>; <6>; <7> BP. The tokens represent: 1. Primary accession number 2. Sequence version number 3. Topology: 'circular' or 'linear' 4. Molecule type (see note 1 below) 5. Data class (see section 3.1 of EMBL user manual) 6. Taxonomic division (see section 3.2 of EMBL user manual) 7. Sequence length (see note 2 below) Note 1 - Molecule type: this represents the type of molecule as stored and can be any value from the list of current values for the mandatory mol_type source qualifier. This item should be the same as the value in the mol_type qualifier(s) in a given entry. Note 2 - Sequence length: The last item on the ID line is the length of the sequence (the total number of bases in the sequence). This number includes base positions reported as present but undetermined (coded as "N"). An example of a complete identification line is shown below: ID CD789012; SV 4; linear; genomic DNA; HTG; MAM; 500 BP. """ # get only the first line of EMBL record line = lines[0] # define a specific patter for EMBL pattern = re.compile(r'ID' r' +([^\s]+);' # ie: CD789012 r' +SV ([0-9]*);' # 4 r' +(\w+);' # linear r' +([^;]+);' # genomic DNA r' +(\w*);' # HTG r' +(\w+);' # MAM r' +(\d+)' # 500 r' +(\w+)\.$') # BP # search it matches = re.match(pattern, line) try: res = dict(zip( ['locus_name', 'version', 'shape', 'mol_type', 'class', 'division', 'size', 'unit'], matches.groups())) except AttributeError: raise EMBLFormatError( "Could not parse the ID line:\n%s" % line) # check for CON entries: if res['class'] == "CON": # entries like http://www.ebi.ac.uk/ena/data/view/LT357133 # doesn't have sequence, so can't be read by skbio.sequence raise EMBLFormatError( "There's no support for embl CON record: for more information " "see issue-1506 (https://github.com/biocore/scikit-bio/issues/" "1506)") # those values are integer res['size'] = int(res['size']) # version could be integer if res['version']: res['version'] = int(res['version']) # unit are in lower cases in others modules res['unit'] = res['unit'].lower() # initialize a date record (for gb compatibility) res['date'] = None # returning parsed attributes return res def _serialize_id(header, obj, metadata={}, indent=5): '''Serialize ID line. Parameters ---------- obj : dict ''' # get key->value pairs, or key->'' if values is None kwargs = {k: '' if v is None else v for k, v in obj.items()} # then unit is in upper cases kwargs["unit"] = kwargs["unit"].upper() # check for missing keys (eg from gb data). Keys in md are in uppercase for key in ["version", "class"]: if key not in kwargs: if key.upper() in metadata: kwargs[key] = metadata[key.upper()] else: kwargs[key] = "" # version from genbank could be "M14399.1 GI:145229". I need an integer version = kwargs["version"] # version could by empty, integer or text if version != '': try: int(kwargs["version"]) # could be a text like M14399.1 except ValueError: match = re.search(r"^\w+\.([0-9]+)", version) if match: kwargs["version"] = match.groups()[0] # return first line return ('{header:<{indent}}{locus_name}; SV {version}; {shape}; ' '{mol_type}; {class}; {division}; {size} {unit}.\n').format( header=header, indent=indent, **kwargs) # similar to skbio.io.format._sequence_feature_vocabulary.__yield_section # but applies to embl file format def _embl_yield_section(get_line_key, **kwargs): '''Returns function that returns successive sections from file. Parameters ---------- get_line_key : callable It takes a string as input and a key indicating the section (could be the embl key or embl KEYS_2_SECTIONS) kwargs : dict, optional Keyword arguments will be passed to `_line_generator`. Returns ------- function A function accept a list of lines as input and return a generator to yield section one by one. ''' def parser(lines): curr = [] curr_type = None for line in _line_generator(lines, **kwargs): # if we find another line, return the previous section line_type = get_line_key(line) # changed line type if line_type != curr_type: if curr: # returning block yield curr, curr_type # reset curr after yield curr = [] # reset curr_type in any cases curr_type = line_type # don't append record if line type is a spacer if 'SPACER' not in line_type: curr.append(line) # don't forget to return the last section in the file if curr: yield curr, curr_type return parser # replace skbio.io.format._sequence_feature_vocabulary._parse_section_default def _embl_parse_section_default( lines, label_delimiter=None, join_delimiter=' ', return_label=False): '''Parse sections in default way. Do 2 things: 1. split first line with label_delimiter for label 2. join all the lines into one str with join_delimiter. ''' data = [] label = None line = lines[0] # take the first line, divide the key from the text items = line.split(label_delimiter, 1) if len(items) == 2: label, section = items else: label = items[0] section = "" # append the text of the first element in a empty array data.append(section) # Then process all the elements with the same embl key. remove the key # and append all the text in the data array data.extend(line.split(label_delimiter, 1)[-1] for line in lines[1:]) # Now concatenate the text using join_delimiter. All content with the same # key will be placed in the same string. Strip final "\n data = join_delimiter.join(i.strip() for i in data) # finally return the merged text content, and the key if needed if return_label: return label, data else: return data # parse an embl reference record. def _parse_reference(lines): '''Parse single REFERENCE field. ''' # parsed reference will be placed here res = {} # define a section splitter with _embl_yield_section function defined in # this module section_splitter = _embl_yield_section(lambda line: _get_embl_key(line), skip_blanks=True, strip=False) # now itereta along sections (lines of the same type) for section, section_name in section_splitter(lines): # this function append all data in the same keywords. A list of lines # as input (see skbio.io.format._sequence_feature_vocabulary) label, data = _embl_parse_section_default( section, join_delimiter=' ', return_label=True) res[label] = data # now RX (CROSS_REFERENCE) is a joined string of multiple values. To get # back to a list of values you can use: re.compile("([^;\s]*); ([^\s]*)") # search for pubmed record, and add the PUBMED key if "RX" in res: match = re.search(r"PUBMED; (\d+)\.", res["RX"]) if match: # add pubmed notation res["PUBMED"] = match.groups()[0] # fix RP field like genbank (if exists), Ie: (bases 1 to 63) if "RP" in res: match = re.search(r"(\d+)-(\d+)", res["RP"]) if match: # fix rp fields res["RP"] = "(bases {start} to {stop})".format( start=match.groups()[0], stop=match.groups()[1]) # return translated keys (EMBL->GB) return _translate_keys(res) def _serialize_reference(header, obj, cross_references, indent=5): """Serialize a list of references""" reference = [] sort_order = ["RC", "RP", "RX", "RG", "RA", "RT", "RL"] # deal with RX pattern and RP pattern RX = re.compile(r"([^;\s]*); ([^\s]*)") RP = re.compile(r"bases (\d+) to (\d+)") # create a copy of obj, that can be changed. I need to delete values or # adding new ones obj = copy.deepcopy(obj) # obj is a list of references. Now is a copy of metadata[SOURCE] for i, data in enumerate(obj): # get the reference number (as the iteration number) embl_key = "RN" # get cross_references if cross_references: cross_reference = cross_references[i] # append cross reference [i] to data (obj[i]) (if they exists) if cross_reference: data["CROSS_REFERENCE"] = cross_reference # delete PUBMED key (already present in CROSS_REFERENCE) if "PUBMED" in data: del(data["PUBMED"]) else: # no cross reference, do I have PUBMED in data? if "PUBMED" in data: # add a fake CROSS_REFERENCE data["CROSS_REFERENCE"] = 'PUBMED; %s.' % data["PUBMED"] # get an embl wrapper wrapper = _get_embl_wrapper(embl_key, indent) # define wrapped string and add RN to embl data reference += wrapper.wrap("[{RN}]".format(RN=i+1)) # now process each record for references for embl_key in sort_order: # get internal key (genbank like key) key = _translate_key(embl_key) # have I this reference in my reference data? if key not in data: continue # if yes, define wrapper wrapper = _get_embl_wrapper(embl_key, indent) # data could have newlines records = data[key].split("\n") for record in records: # strip after newlines record = record.strip() # define wrapped string. beware RX if embl_key == "RX": for match in re.finditer(RX, record): source, link = match.groups() # join text cross_reference = "; ".join([source, link]) reference += wrapper.wrap(cross_reference) # RP case elif embl_key == "RP": match = re.search(RP, record) # if I have position, re-define RP key if match: record = "%s-%s" % match.groups() reference += wrapper.wrap(record) # if not, ignore RP key else: continue # all the other cases, go in wrapper as they are else: reference += wrapper.wrap(record) # add a spacer between references (but no at the final reference) # cause the caller will add spacer if (i+1) < len(obj): reference += ["XX"] # now define a string and add a final "\n" s = "\n".join(reference) + "\n" # and return it return s # parse an embl reference record. def _parse_source(lines): '''Parse single SOURCE field. ''' # parsed reference will be placed here res = {} # define a section splitter with _embl_yield_section function defined in # this module section_splitter = _embl_yield_section(lambda line: _get_embl_key(line), skip_blanks=True, strip=False) # now itereta along sections (lines of the same type) for section, section_name in section_splitter(lines): # this function append all data in the same keywords. A list of lines # as input (see skbio.io.format._sequence_feature_vocabulary) label, data = _embl_parse_section_default( section, join_delimiter=' ', return_label=True) res[label] = data # return translated keys return _translate_keys(res) def _serialize_source(header, obj, indent=5): '''Serialize SOURCE. Parameters ---------- header: section header obj : dict indent : indent length ''' source = [] # treat taxonomy and all others keys for key in ["ORGANISM", "taxonomy", "organelle"]: # get data to serielize data = obj.get(key) # if key is not defined (eg. organelle, continue) if data is None: continue # get embl key for my key (eg, taxonomy -> OC) embl_key = REV_KEYS_TRANSLATOR.get(key, key) # get an embl wrapper wrapper = _get_embl_wrapper(embl_key, indent) # define wrapped string source += wrapper.wrap(data) # now define a string and add a final "\n" s = "\n".join(source) + "\n" # and return it return s def _parse_sequence(lines): '''Parse the sequence section for sequence.''' # result array sequence = [] for line in lines: # ignore record like: # SQ Sequence 275 BP; 64 A; 73 C; 88 G; 50 T; 0 other; if line.startswith('SQ'): continue # remove the numbers inside strings. revome spaces around string items = [i for i in line.split() if not i.isdigit()] # append each sequence items to sequence list sequence += items return ''.join(sequence) def _serialize_sequence(obj, indent=5): '''Serialize seq to SQ. Parameters ---------- obj : DNA, RNA, Sequence Obj ''' # a flag to determine if I wrote header or not flag_header = False # magic numbers: there will be 60 letters (AA, bp) on each line chunk_size = 60 # letters (AA, bp) will be grouped by 10: each group is divided by # one space from each other frag_size = 10 # fasta sequence will have indent spaces on the left, chunk_size/frag_size # groups of frag_size letters separated by n-1 groups of single spaces, # then the sequence length aligned on the right to get a string of # line_size. Setting left and right padding for semplicity pad_right = 65 # there are also 5 columns for indentation pad_left = 10 # sequence number will be in the last 10 columns # get sequence as a string with lower letters (uniprot will be upper!) seq = str(obj).lower() # count bases in sequence. Frequencies returns a dictionary of occurences # of A,C,G,T. Sequences are stored always in capital letters freq = obj.frequencies() # get values instead of popping them: I can't assure that the letter T, # for example, is always present n_a = freq.get('A', 0) n_c = freq.get('C', 0) n_g = freq.get('G', 0) n_t = freq.get('T', 0) # this will be the count of all others letters (more than ACGT) n_others = len(obj) - (n_a + n_c + n_g + n_t) # define SQ like this: # SQ Sequence 275 BP; 63 A; 72 C; 88 G; 52 T; 0 other; SQ = "SQ Sequence {size} {unit}; {n_a} A; {n_c} C; {n_g} G; " +\ "{n_t} T; {n_others} other;\n" # TODO: deal with protein SQ: they have a sequence header like: # SQ SEQUENCE 256 AA; 29735 MW; B4840739BF7D4121 CRC64; # apply format SQ = SQ.format(size=len(obj), unit=obj.metadata["LOCUS"]["unit"].upper(), n_a=n_a, n_c=n_c, n_g=n_g, n_t=n_t, n_others=n_others) for i in range(0, len(seq), chunk_size): line = seq[i:i+chunk_size] # pad string left and right s = '{indent}{s:<{pad_right}}{pos:>{pad_left}}\n'.format( indent=" "*indent, s=chunk_str(line, frag_size, ' '), pad_left=pad_left, pos=i+len(line), pad_right=pad_right) if not flag_header: # First time here. Add SQ header to sequence s = SQ + s # When I added header, I need to turn off this flag flag_header = True yield s def _embl_parse_feature_table(lines, length): """Parse embl feature tables""" # define interval metadata imd = IntervalMetadata(length) # get only FT records, and remove key from line lines = [line[2:] for line in lines if line.startswith('FT')] # magic number 19: after key removal, the lines of each feature # are indented with 19 spaces. feature_indent = ' ' * 19 section_splitter = _yield_section( lambda x: not x.startswith(feature_indent), skip_blanks=True, strip=False) for section in section_splitter(lines): _parse_single_feature(section, imd) return imd def _serialize_feature_table(intervals, indent=21): ''' Parameters ---------- intervals : list of ``Interval`` ''' # define a embl wrapper object. I need to replace only the first two # characters from _serialize_single_feature output wrapper = _get_embl_wrapper("FT", indent=2, subsequent_indent=21) for intvl in intervals: tmp = _serialize_single_feature(intvl, indent) output = [] # I need to remove two spaces, cause I will add a FT key for line in tmp.split("\n"): output += wrapper.wrap(line[2:]) # re add newline between elements, and a final "\n" yield "\n".join(output) + "\n" def _parse_date(lines, label_delimiter=None, return_label=False): """Parse embl date records""" # take the first line, and derive a label label = lines[0].split(label_delimiter, 1)[0] # read all the others dates and append to data array data = [line.split(label_delimiter, 1)[-1] for line in lines] # strip returned data data = [i.strip() for i in data] # finally return data array, and the key if needed if return_label: return label, data else: return data def _serialize_date(embl_key, date_list, indent=5): '''Serialize date line. Parameters ---------- header : embl key id date_list : a list of dates ''' # get an embl wrapper wrapper = _get_embl_wrapper(embl_key, indent) # # serialize date and return them as a string return _serialize_list(wrapper, date_list) def _serialize_comment(embl_key, obj, indent=5): """Serialize comment (like Assembly)""" # obj is a string, Split it by newlines data = obj.split("\n") # get an embl wrapper wrapper = _get_embl_wrapper(embl_key, indent) # serialize data and return it return _serialize_list(wrapper, data) def _serialize_dbsource(embl_key, obj, indent=5): """Serialize DBSOURCE""" # data are stored like 'SILVA-LSU; LK021130. SILVA-SSU; LK021130. ... # I need to split string after final period (not AAT09660.1) # deal with re pattern. A pattern to find a period as end of sentence DR = re.compile(r"\.\s") # splitting by this pattern, I will have # ["SILVA-LSU; LK021130", "SILVA-SSU; LK021130", ...] # I need that each of them will be in a DR record. # get an embl wrapper wrapper = _get_embl_wrapper(embl_key, indent) # serialize data and return it. Split dbsource using re. Add a # final period between elements since I removed it by splitting return _serialize_list(wrapper, re.split(DR, obj), sep=".\n") def _parse_assembly(lines): """Parse embl assembly records""" output = [] # first line is header, skip it for line in lines[1:]: data = line.split() # data could have comp feature or not. First element in data is 'AS' if len(data) == 5: res = dict(zip( ['local_span', 'primary_identifier', 'primary_span', 'comp'], data[1:])) elif len(data) == 4: res = dict(zip( ['local_span', 'primary_identifier', 'primary_span', 'comp'], data[1:]+[''])) else: raise EMBLFormatError("Can't parse assembly line %s" % line) # append res to output output += [res] return output # Map a function to each section of the entry _PARSER_TABLE = { 'LOCUS': _parse_id, 'SOURCE': _parse_source, 'DATE': _parse_date, 'REFERENCE': _parse_reference, 'FEATURES': _embl_parse_feature_table, 'ORIGIN': _parse_sequence, 'ASSEMBLY': _parse_assembly, } # for writer functions _SERIALIZER_TABLE = { 'LOCUS': _serialize_id, 'SOURCE': _serialize_source, 'DATE': _serialize_date, 'REFERENCE': _serialize_reference, 'FEATURES': _serialize_feature_table, 'COMMENT': _serialize_comment, 'DBSOURCE': _serialize_dbsource, }
bsd-3-clause
7,362,532,404,331,829,000
33.590385
79
0.588166
false
omniti-labs/omnifab
omnifab/util.py
1
1629
from fabric.api import cd, hide, run, settings def test(test): """Runs a bash test without outputting any warnings""" return runs_ok("[[ %s ]]" % test) def runs_ok(*args): """Run a command and test its exit status""" with settings(hide('warnings', 'stdout', 'stderr'), warn_only=True): return run(*args).return_code == 0 def mkdir(d): """Ensures a given directory exists""" if not test("-d %s" % d): run("mkdir -p %s" % d) def get_homedir_location(): """Guess the location of home directories on a system""" location = '/home' with settings(hide('warnings', 'stdout', 'stderr'), warn_only=True): # Currently we naively check to see if other common locations for home # dirs exist, and if they do, assume that is where home dirs are # really stored. We could probably do some more checks here. if test("-d /Users"): location = "/Users" # Mac elif test("-d /export/home"): location = "/export/home" # Solaris return location def git_remote(dirname, name, url): """Ensures that a given git checkout has the specified remote set up""" with cd(dirname): remote_info = run("git remote -v | grep %s" % name) if remote_info: # Remote is present current_url = remote_info.split()[1] if current_url != url: # Remote is present, but the URL needs changing run("git remote set-url %s %s" % (name, url)) else: # Remote isn't present, we should add it run("git remote add %s %s" % (name, url))
mit
3,072,800,736,809,706,500
37.785714
78
0.587477
false
rwl/PyCIM
CIM15/IEC61970/Informative/InfLocations/OrgPropertyRole.py
1
3735
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from CIM15.IEC61970.Informative.InfCommon.Role import Role class OrgPropertyRole(Role): """Roles played between Organisations and a given piece of property. For example, the Organisation may be the owner, renter, occupier, taxiing authority, etc.Roles played between Organisations and a given piece of property. For example, the Organisation may be the owner, renter, occupier, taxiing authority, etc. """ def __init__(self, LandProperty=None, ErpOrganisation=None, *args, **kw_args): """Initialises a new 'OrgPropertyRole' instance. @param LandProperty: @param ErpOrganisation: """ self._LandProperty = [] self.LandProperty = [] if LandProperty is None else LandProperty self._ErpOrganisation = None self.ErpOrganisation = ErpOrganisation super(OrgPropertyRole, self).__init__(*args, **kw_args) _attrs = [] _attr_types = {} _defaults = {} _enums = {} _refs = ["LandProperty", "ErpOrganisation"] _many_refs = ["LandProperty"] def getLandProperty(self): return self._LandProperty def setLandProperty(self, value): for p in self._LandProperty: filtered = [q for q in p.ErpOrganisationRoles if q != self] self._LandProperty._ErpOrganisationRoles = filtered for r in value: if self not in r._ErpOrganisationRoles: r._ErpOrganisationRoles.append(self) self._LandProperty = value LandProperty = property(getLandProperty, setLandProperty) def addLandProperty(self, *LandProperty): for obj in LandProperty: if self not in obj._ErpOrganisationRoles: obj._ErpOrganisationRoles.append(self) self._LandProperty.append(obj) def removeLandProperty(self, *LandProperty): for obj in LandProperty: if self in obj._ErpOrganisationRoles: obj._ErpOrganisationRoles.remove(self) self._LandProperty.remove(obj) def getErpOrganisation(self): return self._ErpOrganisation def setErpOrganisation(self, value): if self._ErpOrganisation is not None: filtered = [x for x in self.ErpOrganisation.LandPropertyRoles if x != self] self._ErpOrganisation._LandPropertyRoles = filtered self._ErpOrganisation = value if self._ErpOrganisation is not None: if self not in self._ErpOrganisation._LandPropertyRoles: self._ErpOrganisation._LandPropertyRoles.append(self) ErpOrganisation = property(getErpOrganisation, setErpOrganisation)
mit
-1,032,783,476,870,951,800
40.5
317
0.693976
false
thispc/download-manager
dmanage.py
1
6831
import os from os.path import exists import __builtin__ import sys from os import makedirs, path, chdir from os.path import join from sys import argv, platform import shutil import optparse import json def wdir(): __builtin__.owd = path.abspath("") __builtin__.pypath = path.abspath(path.join(__file__, "..", "..")) sys.path.append(join(pypath, "module", "lib")) homedir = "" if platform == 'nt': homedir = path.expanduser("~") if homedir == "~": import ctypes CSIDL_APPDATA = 26 _SHGetFolderPath = ctypes.windll.shell32.SHGetFolderPathW _SHGetFolderPath.argtypes = [ctypes.wintypes.HWND, ctypes.c_int, ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.LPCWSTR] path_buf = ctypes.wintypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH) result = _SHGetFolderPath(0, CSIDL_APPDATA, 0, 0, path_buf) homedir = path_buf.value else: homedir = path.expanduser("~") __builtin__.homedir = homedir args = " ".join(argv[1:]) if "--configdir=" in args: pos = args.find("--configdir=") end = args.find("-", pos + 12) if end == -1: configdir = args[pos + 12:].strip() else: configdir = args[pos + 12:end].strip() elif path.exists(path.join(pypath, "module", "config", "configdir")): f = open(path.join(pypath, "module", "config", "configdir"), "rb") c = f.read().strip() f.close() configdir = path.join(pypath, c) else: if platform in ("posix", "linux2"): configdir = path.join(homedir, ".pyload") else: configdir = path.join(homedir, "pyload") if not path.exists(configdir): makedirs(configdir, 0700) __builtin__.configdir = configdir return configdir if __name__ == "__main__": wpath=wdir() parser = optparse.OptionParser('usage: %prog [options]') parser.add_option("--start", "-s", dest="sflag",action="store_true", help="Start server") parser.add_option("--manage", "-m", dest="mflag",action="store_true", help="Manage servers") parser.add_option("--configure", "-c", dest="cflag",action="store_true", help="Make configuration file") parser.add_option("--show", "-S", dest="Sflag",action="store_true", help="Show All DC Request") parser.add_option("--queue", "-q", dest="qflag",action="store_true", help="Download queue") parser.add_option("--stop", "-x", dest="xflag",action="store_true", help="Stop Server") parser.add_option("--usermanage", "-u", dest="uflag",action="store_true", help="User Management") group1 = optparse.OptionGroup(parser, 'Add Links directly') group2 = optparse.OptionGroup(parser, 'Add Links by DC request id shown by ' +argv[0]+ ' -S command') group3 = optparse.OptionGroup(parser, 'Deleting downloads by ID. View queue by running '+argv[0]+' -q') group1.add_option("--link", "-a", action="append", help="Links") group1.add_option("--name","-n", action="store",dest="name",help="Name of the links group") group2.add_option("--ssid", "-i", action="append",help="Add download links by ids") group3.add_option("--fid", "-f", action="append",help="Delete download links by queue file ids") group3.add_option("--pid", "-p", action="append",help="Delete download links by queue package ids") parser.add_option_group(group1) parser.add_option_group(group2) parser.add_option_group(group3) (options, args) = parser.parse_args() #print options linksid=options.ssid links=options.link deleteids=options.fid deleteidsp=options.pid if(options.sflag): if not exists(wpath+"/pyload.conf"): print "\nConfig File does not exists. \nFirst run '%s -c' command to make configuration file\n" % sys.argv[0] else: if exists(wpath+"/pyload.pid"): print "Server is already UP!!" pid=open(wpath+"/pyload.pid").read() print "pid %s" %pid sys.exit(2) os.system('python pyLoadCore.py --daemon') print "Server UP!!" sys.exit(2) if(options.cflag): if not exists(wpath+"/pyload.conf"): os.system('python pyLoadCore.py') sys.exit(2) else: print "\nConfiguration file exist!!!!. Overwrite existing config file? y/n\n" inp=str(raw_input()) if(inp[0]=='n' or inp[0]=='N'): print("Abort...") sys.exit(2) else: shutil.rmtree(wpath) os.system('python pyLoadCore.py') sys.exit(2) if(options.mflag): os.system("python pyLoadCli.py") sys.exit(2) if(options.qflag): os.system("python pyLoadCli.py queue") sys.exit(2) if(options.Sflag): os.system("python scraper.py") with open('users.dat') as data_file: data = json.load(data_file) ss=1 for i in data: print "==============================================================================" print "id : %s" % str(ss) print "user : %s" % i["user"] sys.stdout.write ("url : ") for j in i["url"]: print j print "volunteered : %s" % i["volun"] print "Done? : %s" % i["done"] ss=ss+1 print "==============================================================================" if(options.xflag): try: pid=open(wpath+"/pyload.pid").read() except Exception: print "Server is already DOWN!!" sys.exit(0) os.system("python pyLoadCli.py kill") #os.remove(wpath+"/pyload.pid") print "Server Down!!" if(options.uflag): os.system("python pyLoadCore.py -u") if (linksid is not None): with open('users.dat') as data_file: data = json.load(data_file) for i in linksid: l = data[int(i)-1] linkappend="" for j in l["url"]: linkappend=linkappend+" "+j try: os.system("python pyLoadCli.py add "+l["user"]+linkappend) except Exception: print "Error in adding...Abort!" sys.exit(2) print "Download(s) added successfully" if (links is not None): if(options.name is None): print "\nSpecify the name of the download group by -n flag. Aborting.....\n" sys.exit(2) linkappend="" for j in links: linkappend=linkappend+" "+j try: os.system("python pyLoadCli.py add "+options.name+linkappend) print "Download(s) added successfully" except Exception: print "Error in adding...Abort!" raise Exception sys.exit(2) if (deleteids is not None): idappend="" for j in deleteids: idappend=idappend+" "+j try: os.system("python pyLoadCli.py del_file "+idappend) print "Deleted file(s) successfully" except Exception: print "Error in deletion...Abort!" raise Exception sys.exit(2) if (deleteidsp is not None): idappend="" for j in deleteidsp: idappend=idappend+" "+j try: os.system("python pyLoadCli.py del_package "+idappend) print "Deleted packages(s) successfully" except Exception: print "Error in deletion...Abort!" raise Exception sys.exit(2)
gpl-3.0
-8,801,174,971,790,568,000
31.379147
112
0.61865
false
ted-dunstone/ivs
hub_demo/send_test.py
1
9565
#!/usr/bin/env python import pika import uuid import sys import os import getopt import time import logging import random logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.CRITICAL) class MessageQueue(object): def __init__(self, node_name, # the name of node user_id="guest", # the user id ): credentials = pika.PlainCredentials(user_id, 'guest') self.connection = pika.BlockingConnection(pika.ConnectionParameters( host='localhost', credentials=credentials)) self.channel = self.connection.channel() self.node_name = node_name self.user_id = user_id self.corr_dict = {} print "start node %s"%node_name def log(self, msg): print self.node_name + ' : ' + msg def create_queue(self, exchange, name=''): self.channel.exchange_declare(exchange=exchange, type='headers') #result = self.channel.queue_declare( queue=name,exclusive=True,auto_delete=False) result = self.channel.queue_declare(exclusive=True) if not result: print 'Queue didnt declare properly!' sys.exit(1) return result def send(self, exchange, message, header={}, callback=False): callback_queue = None callback_name = '' #self.create_queue(exchange) self.response = None if (callback): callback_queue = self.create_queue(exchange) callback_name = callback_queue.method.queue self.channel.basic_consume(self.on_response_callback, queue = callback_name, no_ack=True) header.update({ "last_node":self.node_name, "destination":exchange}) self.corr_id = str(uuid.uuid4()) self.corr_dict[self.corr_id]=True self.channel.basic_publish(exchange=exchange, routing_key='', body=message, properties = pika.BasicProperties( headers = header, reply_to = callback_name, correlation_id = self.corr_id, user_id = self.user_id) ) self.log(" [x] Sent %r to %s" % (message,exchange)) if (callback): while self.response is None: self.connection.process_data_events() #print "Response:"+str(self.response) #print str(dict(callback_queue)) #self.callback_queue.delete() def queue_bind(self, exchange, header_match={}): queue_name = self.create_queue(exchange,self.node_name).method.queue header_match.update({'x-match':'any'}) self.channel.queue_bind(exchange=exchange, queue = queue_name, routing_key = '', arguments = header_match) return queue_name def on_return_status(self, properties): # called as RPC to return the status of a sent msg (probably synchronously) return "[Nothing implemented]" def on_recieve_callback(self, ch, method, properties, body): #print properties.user_id #print properties.reply_to #print "{headers}:{body}".format(headers = properties.headers, # body = body) #print "wait...." #time.sleep(10.0) if properties.reply_to: response = "Success" ch.basic_publish(exchange='', routing_key=properties.reply_to, properties=pika.BasicProperties(correlation_id = \ properties.correlation_id), body=str(self.on_return_status(properties))) def on_response_callback(self, ch, method, props, body): #self.log("[x] response %s,%s"%(props.correlation_id,str(self.corr_dict))) if props.correlation_id in self.corr_dict: #del self.corr_dict[props.correlation_id] self.response = body def start_consume(self,queue_name): self.channel.basic_qos(prefetch_count=1) self.channel.basic_consume(self.on_recieve_callback, queue = queue_name, no_ack=True) try: self.channel.start_consuming() except KeyboardInterrupt: self.log('Bye') finally: self.connection.close() VERSION = 0.5 REQUEST_EXCHANGE_NAME = "Request" IDENTIFY_EXCHANGE_NAME = "Identify" RESULTS_EXCHANGE_NAME = "Results" class MessageBrokerBase(MessageQueue): def __init__(self, node_name, user_id="guest",header={},exchange_name = REQUEST_EXCHANGE_NAME): super(MessageBrokerBase, self).__init__(node_name, user_id) self.exchange_name = exchange_name self.request_queue=self.queue_bind(self.exchange_name, header) self.log( self.__class__.__name__) def start(self, ): self.start_consume(self.request_queue) class Broker(MessageBrokerBase): def __init__(self, user_id="guest",header={},exchange_name = REQUEST_EXCHANGE_NAME): super(Broker, self).__init__("Broker", user_id,header, exchange_name) def on_return_status(self, properties): # called as RPC to return the status of a sent msg return "[OK] from %s"%self.node_name def on_recieve_callback(self, ch, method, properties, body): super(Broker,self).on_recieve_callback(ch, method, properties, body) self.send(IDENTIFY_EXCHANGE_NAME, body, properties.headers, False) class Matcher(MessageBrokerBase): def __init__(self, node_name, user_id="guest",header={},exchange_name = IDENTIFY_EXCHANGE_NAME): header.update({"from_node":node_name}) super(Matcher, self).__init__(node_name, user_id,header, exchange_name) def on_recieve_callback(self, ch, method, properties, body): super(Matcher,self).on_recieve_callback(ch, method, properties, body) if not(self.node_name in properties.headers): # make sure not to match our own request body = "Match score = %f from %s"%(random.random(),self.node_name) self.send(RESULTS_EXCHANGE_NAME, body, properties.headers) class Requester(MessageQueue): def __init__(self, node_name, user_id="guest"): super(Requester, self).__init__(node_name, user_id) def send(self, msg,header): header.update({self.node_name:True}) super(Requester,self).send(REQUEST_EXCHANGE_NAME,msg,header,True) class Receiver(MessageBrokerBase): def __init__(self, node_name, user_id="guest",header={},exchange_name = RESULTS_EXCHANGE_NAME): super(Receiver, self).__init__(node_name, user_id,header, exchange_name) def on_recieve_callback(self, ch, method, properties, body): super(Receiver,self).on_recieve_callback(ch, method, properties, body) #self.log("**** Result from %s"%(str(properties.headers))) self.log(body) if __name__ == "__main__": import argparse # Parse command line args # note that args can be read from a file using the @ command parser = argparse.ArgumentParser(description='Identity Verification Service',fromfile_prefix_chars='@') parser.add_argument('--rabbitmq_host', default='localhost', help='set the rabbitmq url (default localhost)') parser.add_argument('--redis_host', default='localhost', help='set the redis url (default localhost)') parser.add_argument('--redis_port', default=6379, help='set the redis port (default 6379)') parser.add_argument('--is_broker','-b', action='store_true', help='Is the broker') parser.add_argument('--is_matcher','-m', action='store_true', help='Is a matcher') parser.add_argument('--is_requester','-r', action='store_true', help='Is a requester') parser.add_argument('--is_receiver','-e', action='store_true', help='Is a reciever') parser.add_argument('--name','-n', default='[No Name]', help='Name of the agency/node') parser.add_argument('--country','-c', default="AU", help='Set the country code (default=AU)') parser.add_argument('--location','-l', default="unknown", help='Set location (default=unknown)') parser.add_argument('--version', action='version', version='%(prog)s '+str(VERSION)) args = parser.parse_args() header={"test":"test"} if args.is_matcher: matcher = Matcher(args.name,args.name,header) matcher.start() elif args.is_broker: broker = Broker("broker",header) #queue=broker.queue_bind(dest_queue, header) broker.start() #_consume(queue) elif args.is_requester: requester = Requester(args.name,args.name) requester.send("Hello",header) elif args.is_receiver: receiver = Receiver(args.name,args.name,{args.name:True}) receiver.start() #sendRequest(my_queue, dest_queue, priority, m_type, d_file)
mit
-7,831,741,206,380,664,000
39.189076
107
0.573236
false
azon1272/War-for-cookies-v2
lib/Pathfind.py
1
2951
# -*- coding: cp1251 -*- import lib, numpy#âîëíîâîé àëãîðèòì def pathfind(matrix, x1,y1,x2,y2,steps):#matrix-êàðòà ïðîõîäèìîñòè. îáëàñòè ñî ñâîåé è âðàæüåé àðìèåé òàêæå ïîìåòèòü íåïðîõîäèìûìè! #íåïðîõîäèìûå ÿ÷åéêè - 0, ïðîõîäèìûå - 1 n=len(matrix) m=len(matrix[0])#äëèííà è øèðèíà ìàññèâà for i in range (n): for j in range (m): wavematrix[i][j] = 0 wavematrix[x1][y1] = -1 xb[1]=x1 yb[1]=y1 k=0 lenmas=1 flag=false for z in range (30): #ìàêñèìàëüíàÿ äëèííà âîëíû waveitems=0 #êîë-âî êëåòîê, çàäåòûõ âîëíîé. nextx = [] nexty = [] for j in range(lenmas): for i in range (xb[j]-1,xb[j]+1): #1 ýòàï, ïóñêàåì âîëíó if i<0 or i>n: continue #íó èëè êàê òàì õîä öèêëà ïðîïóñòèòü temp=(z+1)*matrix[i][yb[j]] if wavematrix[i][yb[j]]>temp or wavematrix[i][yb[j]]==1: wavematrix[i][yb[j]]=temp waveitems++ nextx.append(i) nexty.append(yb[j]) for i in range (yb[j]-1,yb[j]+1): #1 ýòàï, ïóñêàåì âîëíó if i<0 or i>m: continue #íó èëè êàê òàì õîä öèêëà ïðîïóñòèòü temp=(z+1)*matrix[x1][y1] if wavematrix[x1][i]==1 or wavematrix[x1][i]>temp: wavematrix[x1][i]=temp waveitems++ nextx.append(x1) nexty.append(y1) for i in range (waveitems): if nextx[i] == x2 and nexty[i] == x2:#åñëè äîøëè äî íóæíîé òî÷êè flag=true#ôëàã íà âûõîä u=z#çàïèñàëè äëèíó âîëíû if flag: break lenmas=waveitems xb=nextx yb=nexty if !flag: #ñîîáùåíèå îá îøèáêå, æåíü ñäåëàé êðàñèâî ÷åðåç ïóãåéì trase = [][] retmove = [][] trase[0][0]=x2 trase[0][1]=y2 for i in range(1,u): if wavematrix[(trase[i-1][0])-1][trase[i-1][1]] == wavematrix[trase[i-1][0]][trase[i-1][1]]-1: trase[i][0] = trase[i-1][0]-1 trase[i][1] = trase[i=1][0] elif wavematrix[(trase[i-1][0])+1][trase[i-1][1]] == wavematrix[trase[i-1][0]][trase[i-1][1]]-1: trase[i][0] = trase[i-1][0]+1 trase[i][1] = trase[i=1][0] elif wavematrix[(trase[i-1][0])][trase[i-1][1]-1] == wavematrix[trase[i-1][0]][trase[i-1][1]]-1: trase[i][0] = trase[i-1][0] trase[i][1] = trase[i=1][0]-1 elif wavematrix[(trase[i-1][0])][trase[i-1][1]+1] == wavematrix[trase[i-1][0]][trase[i-1][1]]-1: trase[i][0] = trase[i-1][0] trase[i][1] = trase[i=1][0]+1 #âñå, îáðàòíàÿ äîðîæêà ïîëó÷åíà. for i in range (steps): retmove[i][0] = trase[u-i][0]#ïåðåâîðà÷èâàåì òðàññó îáðàòíî, è îòðåçàåì ïî êîë-âó øàãîâ retmove[i][1] = trase[u-i][1]
bsd-3-clause
4,589,029,278,420,215,300
41.157143
131
0.500508
false
wooga/airflow
airflow/utils/sqlalchemy.py
1
7004
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import datetime import json import logging import os import time import traceback import pendulum from dateutil import relativedelta from sqlalchemy import event, exc from sqlalchemy.types import DateTime, Text, TypeDecorator from airflow.configuration import conf log = logging.getLogger(__name__) utc = pendulum.timezone('UTC') using_mysql = conf.get('core', 'sql_alchemy_conn').lower().startswith('mysql') def setup_event_handlers(engine): """ Setups event handlers. """ # pylint: disable=unused-argument @event.listens_for(engine, "connect") def connect(dbapi_connection, connection_record): connection_record.info['pid'] = os.getpid() if engine.dialect.name == "sqlite": @event.listens_for(engine, "connect") def set_sqlite_pragma(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("PRAGMA foreign_keys=ON") cursor.close() # this ensures sanity in mysql when storing datetimes (not required for postgres) if engine.dialect.name == "mysql": @event.listens_for(engine, "connect") def set_mysql_timezone(dbapi_connection, connection_record): cursor = dbapi_connection.cursor() cursor.execute("SET time_zone = '+00:00'") cursor.close() @event.listens_for(engine, "checkout") def checkout(dbapi_connection, connection_record, connection_proxy): pid = os.getpid() if connection_record.info['pid'] != pid: connection_record.connection = connection_proxy.connection = None raise exc.DisconnectionError( "Connection record belongs to pid {}, " "attempting to check out in pid {}".format(connection_record.info['pid'], pid) ) if conf.getboolean('debug', 'sqlalchemy_stats', fallback=False): @event.listens_for(engine, "before_cursor_execute") def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): conn.info.setdefault('query_start_time', []).append(time.time()) @event.listens_for(engine, "after_cursor_execute") def after_cursor_execute(conn, cursor, statement, parameters, context, executemany): total = time.time() - conn.info['query_start_time'].pop() file_name = [ f"'{f.name}':{f.filename}:{f.lineno}" for f in traceback.extract_stack() if 'sqlalchemy' not in f.filename][-1] stack = [f for f in traceback.extract_stack() if 'sqlalchemy' not in f.filename] stack_info = ">".join([f"{f.filename.rpartition('/')[-1]}:{f.name}" for f in stack][-3:]) conn.info.setdefault('query_start_time', []).append(time.monotonic()) log.info("@SQLALCHEMY %s |$ %s |$ %s |$ %s ", total, file_name, stack_info, statement.replace("\n", " ") ) # pylint: enable=unused-argument class UtcDateTime(TypeDecorator): """ Almost equivalent to :class:`~sqlalchemy.types.DateTime` with ``timezone=True`` option, but it differs from that by: - Never silently take naive :class:`~datetime.datetime`, instead it always raise :exc:`ValueError` unless time zone aware value. - :class:`~datetime.datetime` value's :attr:`~datetime.datetime.tzinfo` is always converted to UTC. - Unlike SQLAlchemy's built-in :class:`~sqlalchemy.types.DateTime`, it never return naive :class:`~datetime.datetime`, but time zone aware value, even with SQLite or MySQL. - Always returns DateTime in UTC """ impl = DateTime(timezone=True) def process_bind_param(self, value, dialect): if value is not None: if not isinstance(value, datetime.datetime): raise TypeError('expected datetime.datetime, not ' + repr(value)) elif value.tzinfo is None: raise ValueError('naive datetime is disallowed') # For mysql we should store timestamps as naive values # Timestamp in MYSQL is not timezone aware. In MySQL 5.6 # timezone added at the end is ignored but in MySQL 5.7 # inserting timezone value fails with 'invalid-date' # See https://issues.apache.org/jira/browse/AIRFLOW-7001 if using_mysql: from airflow.utils.timezone import make_naive return make_naive(value, timezone=utc) return value.astimezone(utc) return None def process_result_value(self, value, dialect): """ Processes DateTimes from the DB making sure it is always returning UTC. Not using timezone.convert_to_utc as that converts to configured TIMEZONE while the DB might be running with some other setting. We assume UTC datetimes in the database. """ if value is not None: if value.tzinfo is None: value = value.replace(tzinfo=utc) else: value = value.astimezone(utc) return value class Interval(TypeDecorator): """ Base class representing a time interval. """ impl = Text attr_keys = { datetime.timedelta: ('days', 'seconds', 'microseconds'), relativedelta.relativedelta: ( 'years', 'months', 'days', 'leapdays', 'hours', 'minutes', 'seconds', 'microseconds', 'year', 'month', 'day', 'hour', 'minute', 'second', 'microsecond', ), } def process_bind_param(self, value, dialect): if isinstance(value, tuple(self.attr_keys)): attrs = { key: getattr(value, key) for key in self.attr_keys[type(value)] } return json.dumps({'type': type(value).__name__, 'attrs': attrs}) return json.dumps(value) def process_result_value(self, value, dialect): if not value: return value data = json.loads(value) if isinstance(data, dict): type_map = {key.__name__: key for key in self.attr_keys} return type_map[data['type']](**data['attrs']) return data
apache-2.0
-6,113,996,625,261,156,000
38.570621
101
0.630354
false
openshine/osweb
osweb/settings.py
1
2608
#osweb. Main Openshines website #Copyright (C) 2011 Openshine sl # Authors: # Pablo Vieytes <[email protected]> # Roberto Majadas <[email protected]> # Cesar Garcia Tapia <[email protected]> # #This program is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #This program is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with this program. If not, see <http://www.gnu.org/licenses/>. # Django settings for osweb project. import os PROJECT_DIR = os.path.dirname(__file__) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'osweb.db', 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } MEDIA_ROOT = '' MEDIA_URL = '' STATIC_ROOT = '' STATIC_URL = '/static/' STATICFILES_DIRS = ( os.path.join(PROJECT_DIR, 'static'), ) STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'osweb.urls' TEMPLATE_DIRS = ( os.path.join(PROJECT_DIR, 'templates'), ) INSTALLED_APPS = ( #'django.contrib.auth', #'django.contrib.contenttypes', #'django.contrib.sessions', #'django.contrib.sites', #'django.contrib.messages', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', 'django.contrib.staticfiles', 'osweb' ) #Import all application definitions from defs import *
gpl-3.0
-1,936,595,112,509,888,300
27.977778
75
0.708206
false
Keidan/gtkhex
modules/File.py
1
1343
################################################################################### # @file File.py # @author Keidan # @date 01/04/2014 # @par Project # gtkhex # @par Copyright # Copyright 2014 Keidan, all right reserved # This software is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY. # # Licence summary : # You can modify and redistribute the sources code and binaries. # You can send me the bug-fix # Term of the licence in in the file licence.txt. # ################################################################################### class File: def __init__(self, filename=None): self.filename = filename self.data = None def get_data(self): return self.data def set_data(self, data): self.data = data def set_filename(self, filename): self.filename = filename def get_filename(self): return self.filename def get_simple_name(self): if not self.filename: return "Untitled" index = self.filename.replace("\\","/").rfind("/") + 1 return self.filename[index:] def read(self): file = open(self.filename, "r") self.data = file.read() file.close() def write(self): file = open(self.filename, "w+") file.write(self.data) file.close()
gpl-3.0
8,274,144,948,170,201,000
25.86
83
0.534624
false
bjuvensjo/scripts
vang/bitbucket/tests/test_clone_repos.py
1
7332
from unittest.mock import MagicMock, call, patch import pytest from more_itertools import take from pytest import raises from vang.bitbucket.clone_repos import clone from vang.bitbucket.clone_repos import get_config_commands from vang.bitbucket.clone_repos import get_projects_commands from vang.bitbucket.clone_repos import get_repos_commands from vang.bitbucket.clone_repos import main from vang.bitbucket.clone_repos import parse_args from vang.bitbucket.clone_repos import should_be_cloned @patch('vang.bitbucket.clone_repos.is_included', return_value=True) def test_should_be_cloned(mock_is_included): mock_has_branch_map = MagicMock(return_value=True) assert should_be_cloned( 'project', 'repo', { 'includes': ['includes'], 'excludes': ['excludes'] }, mock_has_branch_map, ) assert [call('repo', ['excludes'], ['includes'])] == mock_is_included.mock_calls @patch('vang.bitbucket.clone_repos.run_commands', return_value=iter([1, 2, 3])) @patch('vang.bitbucket.clone_repos.makedirs') def test_clone(mock_makedirs, mock_run_commands): assert [1, 2, 3] == take(3, clone(['commands'], 'root_dir')) assert [call('root_dir', exist_ok=True)] == mock_makedirs.mock_calls assert [ call([('commands', 'root_dir')], check=False, max_processes=25, timeout=60) ] == mock_run_commands.mock_calls @patch( 'vang.bitbucket.clone_repos.get_clone_urls', return_value=[[ 'clone_dir', 'project', 'repo', 'command', ]]) def test_get_projects_commands(mock_get_clone_urls): assert [('clone_dir', 'project', 'repo', 'command')] == list( get_projects_commands('projects', 'branch')) assert [call('projects', True, 'branch', False)] == mock_get_clone_urls.mock_calls @patch('builtins.print') @patch( 'vang.bitbucket.clone_repos.get_clone_urls', return_value=[ ['clone_dir', 'project', 'repo1', 'command'], ['clone_dir', 'project', 'repo2', 'command'], ]) def test_get_repos_commands(mock_get_clone_urls, mock_print): assert [ ('clone_dir', 'project', 'repo1', 'command'), ('clone_dir', 'project', 'repo2', 'command'), ] == get_repos_commands([ 'project/repo1', 'project/repo2', 'project/non_existing_repo', ], 'branch') assert [call({'project'}, True, 'branch', False)] == mock_get_clone_urls.mock_calls assert [call('Warning! Non existing repo: project/non_existing_repo') ] == mock_print.mock_calls @patch('vang.bitbucket.clone_repos.should_be_cloned', return_value=True) @patch( 'vang.bitbucket.clone_repos.get_clone_urls', return_value=[ ['clone_dir', 'project', 'repo1', 'command'], ['clone_dir', 'project', 'repo2', 'command'], ]) @patch( 'vang.bitbucket.clone_repos.has_branch', return_value=[[('project', 'repo1'), True], [('project', 'repo2'), False]]) def test_get_config_commands(mock_has_branch, mock_get_clone_urls, mock_should_be_cloned): assert [ ('clone_dir', 'project', 'repo1', 'command'), ('clone_dir', 'project', 'repo2', 'command'), ] == list( get_config_commands({ 'projects': { 'project': 'project' }, 'branch': 'branch' })) assert [call([('project', 'repo1'), ('project', 'repo2')], 'branch')] == mock_has_branch.mock_calls assert [call( { 'project': 'project' }, True, 'branch', False, )] == mock_get_clone_urls.mock_calls assert [ call( 'project', 'repo1', 'project', { ('project', 'repo1'): True, ('project', 'repo2'): False }, ), call( 'project', 'repo2', 'project', { ('project', 'repo1'): True, ('project', 'repo2'): False }, ) ] == mock_should_be_cloned.mock_calls @patch('builtins.open') @patch('builtins.print') @patch('vang.bitbucket.clone_repos.clone') @patch('vang.bitbucket.clone_repos.get_config_commands') @patch('vang.bitbucket.clone_repos.get_projects_commands') @patch('vang.bitbucket.clone_repos.get_repos_commands') @patch('vang.bitbucket.clone_repos.load') def test_main( mock_load, mock_get_repos_commands, mock_get_projects_commands, mock_get_config_commands, mock_clone, mock_print, mock_open, ): mock_load.return_value = 'load' mock_process = MagicMock() mock_process.stdout.decode.return_value = 'Cloned...' mock_clone.return_value = [mock_process] commands = [ ['clone_dir', 'project', 'repo1', 'command'], ['clone_dir', 'project', 'repo2', 'command'], ] mock_get_config_commands.return_value = commands mock_get_projects_commands.return_value = commands mock_get_repos_commands.return_value = commands assert not main('root_dir', projects=['project'], branch='branch') assert [call( ['project'], 'branch', False, )] == mock_get_projects_commands.mock_calls assert [call(['command', 'command'], 'root_dir')] == mock_clone.mock_calls assert [call('01', 'Cloned...', end='')] == mock_print.mock_calls assert not main('root_dir', repos=['repos'], branch='branch') assert [call( ['repos'], 'branch', False, )] == mock_get_repos_commands.mock_calls assert not main('root_dir', config='config', branch='branch') assert [ call('config', 'rt', encoding='utf-8'), call().__enter__(), call().__exit__(None, None, None) ] == mock_open.mock_calls assert [call( 'load', 'branch', False, )] == mock_get_config_commands.mock_calls @pytest.mark.parametrize("args", [ '', 'foo', '-p p -r r', '-p p -c c', '-r r -c c', ]) def test_parse_args_raises(args): with raises(SystemExit): parse_args(args.split(' ') if args else args) @pytest.mark.parametrize("args, expected", [ [ '-p p1 p2', { 'branch': None, 'config': None, 'root_dir': '.', 'flat': False, 'projects': ['p1', 'p2'], 'repos': None } ], [ '-r r1 r2', { 'branch': None, 'config': None, 'root_dir': '.', 'flat': False, 'projects': None, 'repos': ['r1', 'r2'] } ], [ '-c c', { 'branch': None, 'config': 'c', 'root_dir': '.', 'flat': False, 'projects': None, 'repos': None } ], [ '-c c -b b -d d -f', { 'branch': 'b', 'config': 'c', 'root_dir': 'd', 'flat': True, 'projects': None, 'repos': None } ], ]) def test_parse_args_valid(args, expected): assert expected == parse_args(args.split(' ') if args else '').__dict__
apache-2.0
4,005,530,771,154,760,700
28.095238
79
0.535188
false
genialis/resolwe
resolwe/flow/models/collection.py
1
3505
"""Resolwe collection model.""" from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.indexes import GinIndex from django.contrib.postgres.search import SearchVectorField from django.db import models, transaction from .base import BaseModel, BaseQuerySet from .utils import DirtyError, bulk_duplicate, validate_schema class BaseCollection(BaseModel): """Template for Postgres model for storing a collection.""" class Meta(BaseModel.Meta): """BaseCollection Meta options.""" abstract = True #: detailed description description = models.TextField(blank=True) settings = models.JSONField(default=dict) #: collection descriptor schema descriptor_schema = models.ForeignKey( "flow.DescriptorSchema", blank=True, null=True, on_delete=models.PROTECT ) #: collection descriptor descriptor = models.JSONField(default=dict) #: indicate whether `descriptor` doesn't match `descriptor_schema` (is dirty) descriptor_dirty = models.BooleanField(default=False) #: tags for categorizing objects tags = ArrayField(models.CharField(max_length=255), default=list) #: field used for full-text search search = SearchVectorField(null=True) def save(self, *args, **kwargs): """Perform descriptor validation and save object.""" if self.descriptor_schema: try: validate_schema(self.descriptor, self.descriptor_schema.schema) self.descriptor_dirty = False except DirtyError: self.descriptor_dirty = True elif self.descriptor and self.descriptor != {}: raise ValueError( "`descriptor_schema` must be defined if `descriptor` is given" ) super().save() class CollectionQuerySet(BaseQuerySet): """Query set for ``Collection`` objects.""" @transaction.atomic def duplicate(self, contributor): """Duplicate (make a copy) ``Collection`` objects.""" return bulk_duplicate(collections=self, contributor=contributor) class Collection(BaseCollection): """Postgres model for storing a collection.""" class Meta(BaseCollection.Meta): """Collection Meta options.""" permissions = ( ("view_collection", "Can view collection"), ("edit_collection", "Can edit collection"), ("share_collection", "Can share collection"), ("owner_collection", "Is owner of the collection"), ) indexes = [ models.Index(name="idx_collection_name", fields=["name"]), GinIndex( name="idx_collection_name_trgm", fields=["name"], opclasses=["gin_trgm_ops"], ), models.Index(name="idx_collection_slug", fields=["slug"]), GinIndex(name="idx_collection_tags", fields=["tags"]), GinIndex(name="idx_collection_search", fields=["search"]), ] #: manager objects = CollectionQuerySet.as_manager() #: duplication date and time duplicated = models.DateTimeField(blank=True, null=True) def is_duplicate(self): """Return True if collection is a duplicate.""" return bool(self.duplicated) def duplicate(self, contributor): """Duplicate (make a copy).""" return bulk_duplicate( collections=self._meta.model.objects.filter(pk=self.pk), contributor=contributor, )[0]
apache-2.0
4,438,948,629,701,204,000
32.066038
81
0.637946
false
ravenac95/virtstrap
virtstrap-core/tests/test_registry.py
1
3540
import fudge from virtstrap.registry import * from virtstrap.testing import * def test_initialize_command_registry(): registry = CommandRegistry() class TestCommandRegistry(object): def setup(self): fake_parent = fudge.Fake() self.fake_parent = fake_parent fake_parent.provides('call_hooks') self.registry = CommandRegistry(parent=fake_parent) def test_register_a_command(self): """Test that a command registers correctly""" class FakeCommand(object): name = 'test' registry = self.registry registry.register(FakeCommand) assert registry.retrieve('test') == FakeCommand @fudge.test def test_run_a_command(self): """Test that a command runs correctly""" from virtstrap.commands import Command # Setup fake command instance command_args = ('test', 'options') class FakeCommand(Command): name = 'test' called = False def run(self, options, **kwargs): self.__class__.called = True assert options == 'options' #register to the registry registry = self.registry registry.register(FakeCommand) assert registry.run(*command_args) == 0 assert FakeCommand.called @fudge.test def test_run_a_command_with_fake_kwargs(self): """Test that a command runs correctly with fake kwargs""" from virtstrap.commands import Command # Setup fake command instance command_args = ('test', 'options') class FakeCommand(Command): name = 'test' called = False def run(self, options, **kwargs): self.__class__.called = True assert options == 'options' #register to the registry registry = self.registry registry.register(FakeCommand) assert registry.run(*command_args, test='test') == 0 assert FakeCommand.called @fudge.test def test_list_commands(self): com1 = fake_command('com1') com2 = fake_command('com2') registry = self.registry registry.register(com1) registry.register(com2) assert set(registry.list_commands()) == set(['com1', 'com2']) def test_initialize_plugin_registry(): registry = PluginRegistry() class TestPluginRegistry(object): def setup(self): self.registry = PluginRegistry() def test_run_a_plugin(self): from virtstrap.hooks import create @create('command1', ['event1', 'event2']) def handle_command1_events(event, options, **kwargs): assert event in ['event1', 'event2'] options['%s_1' % event] = True @create('command2', ['event1']) def handle_command2_events(event, options, **kwargs): assert event in 'event1' options['%s_2' % event] = True self.registry.register(handle_command1_events) self.registry.register(handle_command2_events) options = dict(event1_1=False, event2_1=False, event1_2=False) self.registry.call_hooks('command1', 'event1', options) self.registry.call_hooks('command1', 'event2', options) self.registry.call_hooks('command2', 'event1', options) assert options['event1_1'], "Plugin was not run for command1, event1" assert options['event2_1'], "Plugin was not run for command1, event2" assert options['event1_2'], "Plugin was not run for command2, event1"
mit
-3,701,375,800,324,055,000
34.4
77
0.613277
false
aio-libs/yarl
tests/test_update_query.py
1
10772
import enum import pytest from multidict import MultiDict from yarl import URL # with_query def test_with_query(): url = URL("http://example.com") assert str(url.with_query({"a": "1"})) == "http://example.com/?a=1" def test_update_query(): url = URL("http://example.com/") assert str(url.update_query({"a": "1"})) == "http://example.com/?a=1" assert str(URL("test").update_query(a=1)) == "test?a=1" url = URL("http://example.com/?foo=bar") expected_url = URL("http://example.com/?foo=bar&baz=foo") assert url.update_query({"baz": "foo"}) == expected_url assert url.update_query(baz="foo") == expected_url assert url.update_query("baz=foo") == expected_url def test_update_query_with_args_and_kwargs(): url = URL("http://example.com/") with pytest.raises(ValueError): url.update_query("a", foo="bar") def test_update_query_with_multiple_args(): url = URL("http://example.com/") with pytest.raises(ValueError): url.update_query("a", "b") def test_with_query_list_of_pairs(): url = URL("http://example.com") assert str(url.with_query([("a", "1")])) == "http://example.com/?a=1" def test_with_query_list_non_pairs(): url = URL("http://example.com") with pytest.raises(ValueError): url.with_query(["a=1", "b=2" "c=3"]) def test_with_query_kwargs(): url = URL("http://example.com") q = url.with_query(query="1", query2="1").query assert q == dict(query="1", query2="1") def test_with_query_kwargs_and_args_are_mutually_exclusive(): url = URL("http://example.com") with pytest.raises(ValueError): url.with_query({"a": "2", "b": "4"}, a="1") def test_with_query_only_single_arg_is_supported(): url = URL("http://example.com") u1 = url.with_query(b=3) u2 = URL("http://example.com/?b=3") assert u1 == u2 with pytest.raises(ValueError): url.with_query("a=1", "a=b") def test_with_query_empty_dict(): url = URL("http://example.com/?a=b") new_url = url.with_query({}) assert new_url.query_string == "" assert str(new_url) == "http://example.com/" def test_with_query_empty_str(): url = URL("http://example.com/?a=b") assert str(url.with_query("")) == "http://example.com/" def test_with_query_empty_value(): url = URL("http://example.com/") assert str(url.with_query({"a": ""})) == "http://example.com/?a=" def test_with_query_str(): url = URL("http://example.com") assert str(url.with_query("a=1&b=2")) == "http://example.com/?a=1&b=2" def test_with_query_str_non_ascii_and_spaces(): url = URL("http://example.com") url2 = url.with_query("a=1 2&b=знач") assert url2.raw_query_string == "a=1+2&b=%D0%B7%D0%BD%D0%B0%D1%87" assert url2.query_string == "a=1 2&b=знач" def test_with_query_int(): url = URL("http://example.com") assert url.with_query({"a": 1}) == URL("http://example.com/?a=1") def test_with_query_kwargs_int(): url = URL("http://example.com") assert url.with_query(b=2) == URL("http://example.com/?b=2") def test_with_query_list_int(): url = URL("http://example.com") assert str(url.with_query([("a", 1)])) == "http://example.com/?a=1" @pytest.mark.parametrize( ("query", "expected"), [ pytest.param({"a": []}, "", id="empty list"), pytest.param({"a": ()}, "", id="empty tuple"), pytest.param({"a": [1]}, "/?a=1", id="single list"), pytest.param({"a": (1,)}, "/?a=1", id="single tuple"), pytest.param({"a": [1, 2]}, "/?a=1&a=2", id="list"), pytest.param({"a": (1, 2)}, "/?a=1&a=2", id="tuple"), pytest.param({"a[]": [1, 2]}, "/?a%5B%5D=1&a%5B%5D=2", id="key with braces"), pytest.param({"&": [1, 2]}, "/?%26=1&%26=2", id="quote key"), pytest.param({"a": ["1", 2]}, "/?a=1&a=2", id="mixed types"), pytest.param({"&": ["=", 2]}, "/?%26=%3D&%26=2", id="quote key and value"), pytest.param({"a": 1, "b": [2, 3]}, "/?a=1&b=2&b=3", id="single then list"), pytest.param({"a": [1, 2], "b": 3}, "/?a=1&a=2&b=3", id="list then single"), pytest.param({"a": ["1&a=2", 3]}, "/?a=1%26a%3D2&a=3", id="ampersand then int"), pytest.param({"a": [1, "2&a=3"]}, "/?a=1&a=2%26a%3D3", id="int then ampersand"), ], ) def test_with_query_sequence(query, expected): url = URL("http://example.com") expected = "http://example.com{expected}".format_map(locals()) assert str(url.with_query(query)) == expected @pytest.mark.parametrize( "query", [ pytest.param({"a": [[1]]}, id="nested"), pytest.param([("a", [1, 2])], id="tuple list"), ], ) def test_with_query_sequence_invalid_use(query): url = URL("http://example.com") with pytest.raises(TypeError, match="Invalid variable type"): url.with_query(query) class _CStr(str): pass class _EmptyStrEr: def __str__(self): return "" class _CInt(int, _EmptyStrEr): pass class _CFloat(float, _EmptyStrEr): pass @pytest.mark.parametrize( ("value", "expected"), [ pytest.param("1", "1", id="str"), pytest.param(_CStr("1"), "1", id="custom str"), pytest.param(1, "1", id="int"), pytest.param(_CInt(1), "1", id="custom int"), pytest.param(1.1, "1.1", id="float"), pytest.param(_CFloat(1.1), "1.1", id="custom float"), ], ) def test_with_query_valid_type(value, expected): url = URL("http://example.com") expected = "http://example.com/?a={expected}".format_map(locals()) assert str(url.with_query({"a": value})) == expected @pytest.mark.parametrize( ("value", "exc_type"), [ pytest.param(True, TypeError, id="bool"), pytest.param(None, TypeError, id="none"), pytest.param(float("inf"), ValueError, id="non-finite float"), pytest.param(float("nan"), ValueError, id="NaN float"), ], ) def test_with_query_invalid_type(value, exc_type): url = URL("http://example.com") with pytest.raises(exc_type): url.with_query({"a": value}) @pytest.mark.parametrize( ("value", "expected"), [ pytest.param("1", "1", id="str"), pytest.param(_CStr("1"), "1", id="custom str"), pytest.param(1, "1", id="int"), pytest.param(_CInt(1), "1", id="custom int"), pytest.param(1.1, "1.1", id="float"), pytest.param(_CFloat(1.1), "1.1", id="custom float"), ], ) def test_with_query_list_valid_type(value, expected): url = URL("http://example.com") expected = "http://example.com/?a={expected}".format_map(locals()) assert str(url.with_query([("a", value)])) == expected @pytest.mark.parametrize( ("value"), [pytest.param(True, id="bool"), pytest.param(None, id="none")] ) def test_with_query_list_invalid_type(value): url = URL("http://example.com") with pytest.raises(TypeError): url.with_query([("a", value)]) def test_with_int_enum(): class IntEnum(int, enum.Enum): A = 1 url = URL("http://example.com/path") url2 = url.with_query(a=IntEnum.A) assert str(url2) == "http://example.com/path?a=1" def test_with_float_enum(): class FloatEnum(float, enum.Enum): A = 1.1 url = URL("http://example.com/path") url2 = url.with_query(a=FloatEnum.A) assert str(url2) == "http://example.com/path?a=1.1" def test_with_query_multidict(): url = URL("http://example.com/path") q = MultiDict([("a", "b"), ("c", "d")]) assert str(url.with_query(q)) == "http://example.com/path?a=b&c=d" def test_with_multidict_with_spaces_and_non_ascii(): url = URL("http://example.com") url2 = url.with_query({"a b": "ю б"}) assert url2.raw_query_string == "a+b=%D1%8E+%D0%B1" def test_with_query_multidict_with_unsafe(): url = URL("http://example.com/path") url2 = url.with_query({"a+b": "?=+&;"}) assert url2.raw_query_string == "a%2Bb=?%3D%2B%26%3B" assert url2.query_string == "a%2Bb=?%3D%2B%26%3B" assert url2.query == {"a+b": "?=+&;"} def test_with_query_None(): url = URL("http://example.com/path?a=b") assert url.with_query(None).query_string == "" def test_with_query_bad_type(): url = URL("http://example.com") with pytest.raises(TypeError): url.with_query(123) def test_with_query_bytes(): url = URL("http://example.com") with pytest.raises(TypeError): url.with_query(b"123") def test_with_query_bytearray(): url = URL("http://example.com") with pytest.raises(TypeError): url.with_query(bytearray(b"123")) def test_with_query_memoryview(): url = URL("http://example.com") with pytest.raises(TypeError): url.with_query(memoryview(b"123")) @pytest.mark.parametrize( ("query", "expected"), [ pytest.param([("key", "1;2;3")], "?key=1%3B2%3B3", id="tuple list semicolon"), pytest.param({"key": "1;2;3"}, "?key=1%3B2%3B3", id="mapping semicolon"), pytest.param([("key", "1&a=2")], "?key=1%26a%3D2", id="tuple list ampersand"), pytest.param({"key": "1&a=2"}, "?key=1%26a%3D2", id="mapping ampersand"), pytest.param([("&", "=")], "?%26=%3D", id="tuple list quote key"), pytest.param({"&": "="}, "?%26=%3D", id="mapping quote key"), pytest.param( [("a[]", "3")], "?a%5B%5D=3", id="quote one key braces", ), pytest.param( [("a[]", "3"), ("a[]", "4")], "?a%5B%5D=3&a%5B%5D=4", id="quote many key braces", ), ], ) def test_with_query_params(query, expected): url = URL("http://example.com/get") url2 = url.with_query(query) assert str(url2) == ("http://example.com/get" + expected) def test_with_query_only(): url = URL() url2 = url.with_query(key="value") assert str(url2) == "?key=value" def test_with_query_complex_url(): target_url = "http://example.com/?game=bulls+%26+cows" url = URL("/redir").with_query({"t": target_url}) assert url.query["t"] == target_url def test_update_query_multiple_keys(): url = URL("http://example.com/path?a=1&a=2") u2 = url.update_query([("a", "3"), ("a", "4")]) assert str(u2) == "http://example.com/path?a=3&a=4" # mod operator def test_update_query_with_mod_operator(): url = URL("http://example.com/") assert str(url % {"a": "1"}) == "http://example.com/?a=1" assert str(url % [("a", "1")]) == "http://example.com/?a=1" assert str(url % "a=1&b=2") == "http://example.com/?a=1&b=2" assert str(url % {"a": "1"} % {"b": "2"}) == "http://example.com/?a=1&b=2" assert str(url % {"a": "1"} % {"a": "3", "b": "2"}) == "http://example.com/?a=3&b=2" assert str(url / "foo" % {"a": "1"}) == "http://example.com/foo?a=1"
apache-2.0
1,811,079,572,952,755,000
29.061453
88
0.564951
false
tonyteate/pydocxgae
pydocx/DocxParser.py
1
27232
import logging import os import zipfile from abc import abstractmethod, ABCMeta from contextlib import contextmanager from pydocx.utils import ( MulitMemoizeMixin, PydocxPreProcessor, find_all, find_ancestor_with_tag, find_first, get_list_style, has_descendant_with_tag, parse_xml_from_string, ) from pydocx.exceptions import MalformedDocxException logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("NewParser") # http://openxmldeveloper.org/discussions/formats/f/15/p/396/933.aspx EMUS_PER_PIXEL = 9525 USE_ALIGNMENTS = True JUSTIFY_CENTER = 'center' JUSTIFY_LEFT = 'left' JUSTIFY_RIGHT = 'right' INDENTATION_RIGHT = 'right' INDENTATION_LEFT = 'left' INDENTATION_FIRST_LINE = 'firstLine' DISABLED_STYLE_VALUES = ['false', '0', 'none'] # Add some helper functions to Element to make it slightly more readable @contextmanager def ZipFile(path): # This is not needed in python 3.2+ try: f = zipfile.ZipFile(path) except zipfile.BadZipfile: raise MalformedDocxException('Passed in document is not a docx') yield f f.close() class DocxParser(MulitMemoizeMixin): __metaclass__ = ABCMeta pre_processor_class = PydocxPreProcessor def _extract_xml(self, f, xml_path): try: return f.read(xml_path) except KeyError: return None def _build_data(self, path, *args, **kwargs): with ZipFile(path) as f: # These must be in the ZIP in order for the docx to be valid. self.document_text = f.read('word/document.xml') self.relationship_text = f.read('word/_rels/document.xml.rels') # These are all optional. self.styles_text = self._extract_xml(f, 'word/styles.xml') self.fonts = self._extract_xml(f, 'word/fontTable.xml') self.numbering_text = self._extract_xml(f, 'word/numbering.xml') self.comment_text = self._extract_xml(f, 'word/comments.xml') zipped_image_files = [ e for e in f.infolist() if e.filename.startswith('word/media/') ] for e in zipped_image_files: self._image_data[e.filename] = f.read(e.filename) self.root = parse_xml_from_string(self.document_text) self.numbering_root = None if self.numbering_text: self.numbering_root = parse_xml_from_string(self.numbering_text) self.comment_root = None if self.comment_text: self.comment_root = parse_xml_from_string(self.comment_text) def _parse_run_properties(self, rPr): """ Takes an `rPr` and returns a dictionary contain the tag name mapped to the child's value property. If you have an rPr that looks like this: <w:rPr> <w:b/> <w:u val="false"/> <w:sz val="16"/> </w:rPr> That will result in a dictionary that looks like this: { 'b': '', 'u': 'false', 'sz': '16', } """ run_properties = {} if rPr is None: return {} for run_property in rPr: val = run_property.get('val', '').lower() run_properties[run_property.tag] = val return run_properties def _parse_styles(self): if self.styles_text is None: return {} tree = parse_xml_from_string(self.styles_text) styles_dict = {} for style in find_all(tree, 'style'): style_val = find_first(style, 'name').attrib['val'] run_properties = find_first(style, 'rPr') styles_dict[style.attrib['styleId']] = { 'style_name': style_val, 'default_run_properties': self._parse_run_properties( run_properties, ), } return styles_dict def _parse_rels_root(self): tree = parse_xml_from_string(self.relationship_text) rels_dict = {} for el in tree: rId = el.get('Id') target = el.get('Target') rels_dict[rId] = target return rels_dict def __init__( self, path, convert_root_level_upper_roman=False, *args, **kwargs): self._parsed = '' self.block_text = '' self.page_width = 0 self.convert_root_level_upper_roman = convert_root_level_upper_roman self._image_data = {} self._build_data(path, *args, **kwargs) self.pre_processor = None #divide by 20 to get to pt (Office works in 20th's of a point) """ see http://msdn.microsoft.com/en-us/library/documentformat .openxml.wordprocessing.indentation.aspx """ if find_first(self.root, 'pgSz') is not None: self.page_width = int( find_first(self.root, 'pgSz').attrib['w'] ) / 20 #all blank when we init self.comment_store = None self.visited = set() self.list_depth = 0 self.rels_dict = self._parse_rels_root() self.styles_dict = self._parse_styles() self.parse_begin(self.root) # begin to parse def parse_begin(self, el): self.populate_memoization({ 'find_all': find_all, 'find_first': find_first, 'has_descendant_with_tag': has_descendant_with_tag, '_get_tcs_in_column': self._get_tcs_in_column, }) self.pre_processor = self.pre_processor_class( convert_root_level_upper_roman=self.convert_root_level_upper_roman, styles_dict=self.styles_dict, numbering_root=self.numbering_root, ) self.pre_processor.perform_pre_processing(el) self._parsed += self.parse(el) def parse(self, el): if el in self.visited: return '' self.visited.add(el) parsed = '' for child in el: # recursive. So you can get all the way to the bottom parsed += self.parse(child) if el.tag == 'br' and el.attrib.get('type') == 'page': return self.parse_page_break(el, parsed) elif el.tag == 'tbl': return self.parse_table(el, parsed) elif el.tag == 'tr': return self.parse_table_row(el, parsed) elif el.tag == 'tc': return self.parse_table_cell(el, parsed) elif el.tag == 'r': return self.parse_r(el, parsed) elif el.tag == 't': return self.parse_t(el, parsed) elif el.tag == 'tab': return self.parse_tab(el, parsed) elif el.tag == 'noBreakHyphen': return self.parse_hyphen(el, parsed) elif el.tag == 'br': return self.parse_break_tag(el, parsed) elif el.tag == 'delText': return self.parse_deletion(el, parsed) elif el.tag == 'p': return self.parse_p(el, parsed) elif el.tag == 'ins': return self.parse_insertion(el, parsed) elif el.tag == 'hyperlink': return self.parse_hyperlink(el, parsed) elif el.tag in ('pict', 'drawing'): return self.parse_image(el) else: return parsed def parse_page_break(self, el, text): #TODO figure out what parsed is getting overwritten return self.page_break() def parse_table(self, el, text): return self.table(text) def parse_table_row(self, el, text): return self.table_row(text) def parse_table_cell(self, el, text): v_merge = find_first(el, 'vMerge') if v_merge is not None and ( 'restart' != v_merge.get('val', '')): return '' colspan = self.get_colspan(el) rowspan = self._get_rowspan(el, v_merge) if rowspan > 1: rowspan = str(rowspan) else: rowspan = '' return self.table_cell(text, colspan, rowspan) def parse_list(self, el, text): """ All the meat of building the list is done in _parse_list, however we call this method for two reasons: It is the naming convention we are following. And we need a reliable way to raise and lower the list_depth (which is used to determine if we are in a list). I could have done this in _parse_list, however it seemed cleaner to do it here. """ self.list_depth += 1 parsed = self._parse_list(el, text) self.list_depth -= 1 if self.pre_processor.is_in_table(el): return self.parse_table_cell_contents(el, parsed) return parsed def get_list_style(self, num_id, ilvl): return get_list_style(self.numbering_root, num_id, ilvl) def _build_list(self, el, text): # Get the list style for the pending list. lst_style = self.get_list_style( self.pre_processor.num_id(el).num_id, self.pre_processor.ilvl(el), ) parsed = text # Create the actual list and return it. if lst_style == 'bullet': return self.unordered_list(parsed) else: return self.ordered_list( parsed, lst_style, ) def _parse_list(self, el, text): parsed = self.parse_list_item(el, text) num_id = self.pre_processor.num_id(el) ilvl = self.pre_processor.ilvl(el) # Everything after this point assumes the first element is not also the # last. If the first element is also the last then early return by # building and returning the completed list. if self.pre_processor.is_last_list_item_in_root(el): return self._build_list(el, parsed) next_el = self.pre_processor.next(el) def is_same_list(next_el, num_id, ilvl): # Bail if next_el is not an element if next_el is None: return False if self.pre_processor.is_last_list_item_in_root(next_el): return False # If next_el is not a list item then roll it into the list by # returning True. if not self.pre_processor.is_list_item(next_el): return True if self.pre_processor.num_id(next_el) != num_id: # The next element is a new list entirely return False if self.pre_processor.ilvl(next_el) < ilvl: # The next element is de-indented, so this is really the last # element in the list return False return True while is_same_list(next_el, num_id, ilvl): if next_el in self.visited: # Early continue for elements we have already visited. next_el = self.pre_processor.next(next_el) continue if self.pre_processor.is_list_item(next_el): # Reset the ilvl ilvl = self.pre_processor.ilvl(next_el) parsed += self.parse(next_el) next_el = self.pre_processor.next(next_el) def should_parse_last_el(last_el, first_el): if last_el is None: return False # Different list if ( self.pre_processor.num_id(last_el) != self.pre_processor.num_id(first_el)): return False # Will be handled when the ilvls do match (nesting issue) if ( self.pre_processor.ilvl(last_el) != self.pre_processor.ilvl(first_el)): return False # We only care about last items that have not been parsed before # (first list items are always parsed at the beginning of this # method.) return ( not self.pre_processor.is_first_list_item(last_el) and self.pre_processor.is_last_list_item_in_root(last_el) ) if should_parse_last_el(next_el, el): parsed += self.parse(next_el) # If the list has no content, then we don't need to worry about the # list styling, because it will be stripped out. if parsed == '': return parsed return self._build_list(el, parsed) def justification(self, el, text): paragraph_tag_property = el.find('pPr') if paragraph_tag_property is None: return text _justification = paragraph_tag_property.find('jc') indentation = paragraph_tag_property.find('ind') if _justification is None and indentation is None: return text alignment = None right = None left = None firstLine = None if _justification is not None: # text alignments value = _justification.attrib['val'] if value in [JUSTIFY_LEFT, JUSTIFY_CENTER, JUSTIFY_RIGHT]: alignment = value if indentation is not None: if INDENTATION_RIGHT in indentation.attrib: right = indentation.attrib[INDENTATION_RIGHT] # divide by 20 to get to pt. multiply by (4/3) to get to px right = (int(right) / 20) * float(4) / float(3) right = str(right) if INDENTATION_LEFT in indentation.attrib: left = indentation.attrib[INDENTATION_LEFT] left = (int(left) / 20) * float(4) / float(3) left = str(left) if INDENTATION_FIRST_LINE in indentation.attrib: firstLine = indentation.attrib[INDENTATION_FIRST_LINE] firstLine = (int(firstLine) / 20) * float(4) / float(3) firstLine = str(firstLine) if any([alignment, firstLine, left, right]): return self.indent(text, alignment, firstLine, left, right) return text def parse_p(self, el, text): if text == '': return '\n' # TODO This is still not correct, however it fixes the bug. We need to # apply the classes/styles on p, td, li and h tags instead of inline, # but that is for another ticket. text = self.justification(el, text) if self.pre_processor.is_first_list_item(el): return self.parse_list(el, text) if self.pre_processor.heading_level(el): return self.parse_heading(el, text) if self.pre_processor.is_list_item(el): return self.parse_list_item(el, text) if self.pre_processor.is_in_table(el): return self.parse_table_cell_contents(el, text) parsed = text # No p tags in li tags if self.list_depth == 0: parsed = self.paragraph(parsed) return parsed def _should_append_break_tag(self, next_el): paragraph_like_tags = [ 'p', ] inline_like_tags = [ 'smartTag', 'ins', 'delText', ] if self.pre_processor.is_list_item(next_el): return False if self.pre_processor.previous(next_el) is None: return False tag_is_inline_like = any( self.memod_tree_op('has_descendant_with_tag', next_el, tag) for tag in inline_like_tags ) if tag_is_inline_like: return False if ( self.pre_processor.is_last_list_item_in_root( self.pre_processor.previous(next_el))): return False if self.pre_processor.previous(next_el).tag not in paragraph_like_tags: return False if next_el.tag not in paragraph_like_tags: return False return True def parse_heading(self, el, parsed): return self.heading(parsed, self.pre_processor.heading_level(el)) def parse_list_item(self, el, text): # If for whatever reason we are not currently in a list, then start # a list here. This will only happen if the num_id/ilvl combinations # between lists is not well formed. parsed = text if self.list_depth == 0: return self.parse_list(el, parsed) def _should_parse_next_as_content(el): """ Get the contents of the next el and append it to the contents of the current el (that way things like tables are actually in the li tag instead of in the ol/ul tag). """ next_el = self.pre_processor.next(el) if next_el is None: return False if ( not self.pre_processor.is_list_item(next_el) and not self.pre_processor.is_last_list_item_in_root(el) ): return True if self.pre_processor.is_first_list_item(next_el): if ( self.pre_processor.num_id(next_el) == self.pre_processor.num_id(el)): return True return False while el is not None: if _should_parse_next_as_content(el): el = self.pre_processor.next(el) next_elements_content = self.parse(el) if not next_elements_content: continue if self._should_append_break_tag(el): parsed += self.break_tag() parsed += next_elements_content else: break # Create the actual li element return self.list_element(parsed) def _get_tcs_in_column(self, tbl, column_index): return [ tc for tc in self.memod_tree_op('find_all', tbl, 'tc') if self.pre_processor.column_index(tc) == column_index ] def _get_rowspan(self, el, v_merge): restart_in_v_merge = False if v_merge is not None and 'val' in v_merge.attrib: restart_in_v_merge = 'restart' in v_merge.attrib['val'] if not restart_in_v_merge: return '' current_row = self.pre_processor.row_index(el) current_col = self.pre_processor.column_index(el) rowspan = 1 result = '' tbl = find_ancestor_with_tag(self.pre_processor, el, 'tbl') # We only want table cells that have a higher row_index that is greater # than the current_row and that are on the current_col if tbl is None: return '' tcs = [ tc for tc in self.memod_tree_op( '_get_tcs_in_column', tbl, current_col, ) if self.pre_processor.row_index(tc) >= current_row ] def should_increment_rowspan(tc): if not self.pre_processor.vmerge_continue(tc): return False return True for tc in tcs: if should_increment_rowspan(tc): rowspan += 1 else: rowspan = 1 if rowspan > 1: result = rowspan return str(result) def get_colspan(self, el): grid_span = find_first(el, 'gridSpan') if grid_span is None: return '' return grid_span.attrib['val'] def parse_table_cell_contents(self, el, text): parsed = text next_el = self.pre_processor.next(el) if next_el is not None: if self._should_append_break_tag(next_el): parsed += self.break_tag() return parsed def parse_hyperlink(self, el, text): rId = el.get('id') href = self.rels_dict.get(rId) if not href: return text href = self.escape(href) return self.hyperlink(text, href) def _get_image_id(self, el): # Drawings blip = find_first(el, 'blip') if blip is not None: # On drawing tags the id is actually whatever is returned from the # embed attribute on the blip tag. Thanks a lot Microsoft. return blip.get('embed') # Picts imagedata = find_first(el, 'imagedata') if imagedata is not None: return imagedata.get('id') def _convert_image_size(self, size): return size / EMUS_PER_PIXEL def _get_image_size(self, el): """ If we can't find a height or width, return 0 for whichever is not found, then rely on the `image` handler to strip those attributes. This functionality can change once we integrate PIL. """ sizes = find_first(el, 'ext') if sizes is not None and sizes.get('cx'): if sizes.get('cx'): x = self._convert_image_size(int(sizes.get('cx'))) if sizes.get('cy'): y = self._convert_image_size(int(sizes.get('cy'))) return ( '%dpx' % x, '%dpx' % y, ) shape = find_first(el, 'shape') if shape is not None and shape.get('style') is not None: # If either of these are not set, rely on the method `image` to not # use either of them. x = 0 y = 0 styles = shape.get('style').split(';') for s in styles: if s.startswith('height:'): y = s.split(':')[1] if s.startswith('width:'): x = s.split(':')[1] return x, y return 0, 0 def parse_image(self, el): x, y = self._get_image_size(el) rId = self._get_image_id(el) src = self.rels_dict.get(rId) if not src: return '' src = os.path.join( 'word', src, ) if src in self._image_data: filename = os.path.split(src)[-1] return self.image(self._image_data[src], filename, x, y) return '' def _is_style_on(self, value): """ For b, i, u (bold, italics, and underline) merely having the tag is not sufficient. You need to check to make sure it is not set to "false" as well. """ return value not in DISABLED_STYLE_VALUES def parse_t(self, el, parsed): if el.text is None: return '' return self.escape(el.text) def parse_tab(self, el, parsed): # return ' ' return self.tab() def parse_hyphen(self, el, parsed): return '-' def parse_break_tag(self, el, parsed): return self.break_tag() def parse_deletion(self, el, parsed): if el.text is None: return '' return self.deletion(el.text, '', '') def parse_insertion(self, el, parsed): return self.insertion(parsed, '', '') def parse_r(self, el, parsed): """ Parse the running text. """ text = parsed if not text: return '' run_properties = {} # Get the rPr for the current style, they are the defaults. p = find_ancestor_with_tag(self.pre_processor, el, 'p') paragraph_style = self.memod_tree_op('find_first', p, 'pStyle') if paragraph_style is not None: style = paragraph_style.get('val') style_defaults = self.styles_dict.get(style, {}) run_properties.update( style_defaults.get('default_run_properties', {}), ) # Get the rPr for the current r tag, they are overrides. run_properties_element = el.find('rPr') if run_properties_element: local_run_properties = self._parse_run_properties( run_properties_element, ) run_properties.update(local_run_properties) inline_tag_handlers = { 'b': self.bold, 'i': self.italics, 'u': self.underline, 'caps': self.caps, 'smallCaps': self.small_caps, 'strike': self.strike, 'dstrike': self.strike, 'vanish': self.hide, 'webHidden': self.hide, } styles_needing_application = [] for property_name, property_value in run_properties.items(): # These tags are a little different, handle them separately # from the rest. # This could be a superscript or a subscript if property_name == 'vertAlign': if property_value == 'superscript': styles_needing_application.append(self.superscript) elif property_value == 'subscript': styles_needing_application.append(self.subscript) else: if ( property_name in inline_tag_handlers and self._is_style_on(property_value) ): styles_needing_application.append( inline_tag_handlers[property_name], ) # Apply all the handlers. for func in styles_needing_application: text = func(text) return text @property def parsed(self): return self._parsed @property def escape(self, text): return text @abstractmethod def linebreak(self): return '' @abstractmethod def paragraph(self, text): return text @abstractmethod def heading(self, text, heading_level): return text @abstractmethod def insertion(self, text, author, date): return text @abstractmethod def hyperlink(self, text, href): return text @abstractmethod def image_handler(self, path): return path @abstractmethod def image(self, data, filename, x, y): return self.image_handler(data) @abstractmethod def deletion(self, text, author, date): return text @abstractmethod def bold(self, text): return text @abstractmethod def italics(self, text): return text @abstractmethod def underline(self, text): return text @abstractmethod def caps(self, text): return text @abstractmethod def small_caps(self, text): return text @abstractmethod def strike(self, text): return text @abstractmethod def hide(self, text): return text @abstractmethod def superscript(self, text): return text @abstractmethod def subscript(self, text): return text @abstractmethod def tab(self): return True @abstractmethod def ordered_list(self, text): return text @abstractmethod def unordered_list(self, text): return text @abstractmethod def list_element(self, text): return text @abstractmethod def table(self, text): return text @abstractmethod def table_row(self, text): return text @abstractmethod def table_cell(self, text): return text @abstractmethod def page_break(self): return True @abstractmethod def indent(self, text, left='', right='', firstLine=''): return text # TODO JUSTIFIED JUSTIFIED TEXT
apache-2.0
-2,546,369,146,959,298,000
32.128954
79
0.550749
false
pfcurtis/twitter-spark-sentiment
bin/twitter_stream.py
1
5737
#!/bin/env python import os import sys import SocketServer sys.path.append('../lib') from multiprocessing import Process, Queue from tweepy import StreamListener from tweepy import OAuthHandler from tweepy import Stream from tweepy import API import json import logging from optparse import OptionParser from HTMLParser import HTMLParser import errno from datetime import datetime import ConfigParser this_dir = os.path.abspath(os.path.dirname(__file__)) logging.basicConfig() logger = logging.getLogger(os.path.join(this_dir, 'twitter_stream')) logger.setLevel(logging.INFO) CONFIGFILE = '../conf/twitter_stream.config' config = ConfigParser.ConfigParser() config.read(CONFIGFILE) try: CONSUMER_KEY = config.get('Twitter Keys', 'CONSUMER_KEY') CONSUMER_SECRET = config.get('Twitter Keys', 'CONSUMER_SECRET') ACCESS_TOKEN = config.get('Twitter Keys', 'ACCESS_TOKEN') ACCESS_TOKEN_SECRET = config.get('Twitter Keys', 'ACCESS_TOKEN_SECRET') except ConfigParser.NoSectionError as e: logger.warn(e) sys.exit(1) class DirNotFoundException(Exception): pass class TweetSaver(object): """A utility to append tweets to a json file tweet_saver = TweetSaver(save_dir="/path/to/save/tweets") Will create the following file tree: <save_dir>/YYYY/MM/DD/HH/tweets.json based on the created_at field in the tweet. """ def __init__(self, save_dir="."): self._saveDir = None self.saveDir = save_dir self._tweetCounter = 0 self._twitter_time_format = "%a %b %d %H:%M:%S +0000 %Y" def _make_sure_path_exists(self, path): try: os.makedirs(path) except OSError as exception: if exception.errno != errno.EEXIST: raise @property def saveDir(self): return self._saveDir @saveDir.setter def saveDir(self, value): if not os.path.exists(value): raise DirNotFoundException("Directory %s not found!" % value) self._saveDir = value def saveTweet(self, tweet): """Appends tweet text (raw) to a tweets.json file in <self.saveDir>/YYYY/MM/DD/HH/tweets.json based on created_at field. """ try: data = json.loads(HTMLParser().unescape(tweet)) created_at = datetime.strptime(data['created_at'], self._twitter_time_format) save_dir = os.path.join(os.path.abspath(self._saveDir), str(created_at.year), str(created_at.month).zfill(2), str(created_at.day).zfill(2), str(created_at.hour).zfill(2)) self._make_sure_path_exists(save_dir) tweet_file = os.path.join(save_dir, 'tweets.json') with open(tweet_file, 'a') as f: f.write(tweet) self._tweetCounter += 1 # logger.info("Saved %d tweets." % self._tweetCounter) sys.stdout.write("\rSaved %d tweets." % self._tweetCounter) sys.stdout.flush() f.close() except Exception, e: logger.exception(e) return class SaveTweetsListener(StreamListener): """ A listener that saves tweets to a specified directory """ def __init__(self, tweet_saver=None, api=None): super(SaveTweetsListener, self).__init__(api=api) self._tweet_saver = tweet_saver if tweet_saver is None: raise Exception("Need a tweet saver!") def on_data(self, raw_data): """Run when data comes through. Write raw_data to file. """ super(SaveTweetsListener, self).on_data(raw_data) self._tweet_saver.saveTweet(raw_data) def on_error(self, status): logger.warn(status) def parseOptions(): parser = OptionParser() parser.add_option("-q", "--query", dest="query", help="Quoted, comma-sepparated list of queries.", metavar='"Phillies, Red Sox"') parser.add_option("-d", "--dir", dest="directory", default=".", metavar="DIR", help="Directory to save the tweets to.") parser.add_option("-I", dest="index_tweets", action="store_true", help="Save tweets to an elasticsearch index") parser.add_option("-i", "--index", dest="index", default="default", help="Index to save tweets to for elasticsearch.") parser.add_option("-t", "--type", dest="type", default="tweet", help="Document type.") return parser.parse_args() if __name__ == '__main__': try: (options, args) = parseOptions() tweet_saver = TweetSaver(save_dir=options.directory) if config.has_section('Proxy'): api = API(proxy=config.get('Proxy', 'https_proxy')) else: api = API() l = SaveTweetsListener(tweet_saver=tweet_saver, api=api) auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) if not options.query: print "Query required." sys.exit(1) tweetQueue = Queue() query = [x.strip() for x in options.query.split(',')] print("Listening for tweets containing: %s" % ', '.join(query)) stream = Stream(auth, l) stream.filter(track=query) except DirNotFoundException, e: logger.warn(e) sys.exit(1) except KeyboardInterrupt: logger.warn("Keyboard interrupt... exiting.") sys.exit(1) except Exception: raise
gpl-2.0
6,678,750,039,123,315,000
31.230337
75
0.589332
false
111pontes/ydk-py
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_fia_internal_tcam_oper.py
1
18546
""" Cisco_IOS_XR_fia_internal_tcam_oper This module contains a collection of YANG definitions for Cisco IOS\-XR fia\-internal\-tcam package operational data. This module contains definitions for the following management objects\: controller\: Controller Resources Copyright (c) 2013\-2016 by Cisco Systems, Inc. All rights reserved. """ import re import collections from enum import Enum from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict from ydk.errors import YPYError, YPYModelError class Controller(object): """ Controller Resources .. attribute:: dpa Controller DPA operational data **type**\: :py:class:`Dpa <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.dpa = Controller.Dpa() self.dpa.parent = self class Dpa(object): """ Controller DPA operational data .. attribute:: nodes DPA data for available nodes **type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.nodes = Controller.Dpa.Nodes() self.nodes.parent = self class Nodes(object): """ DPA data for available nodes .. attribute:: node DPA operational data for a particular node **type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes.Node>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.node = YList() self.node.parent = self self.node.name = 'node' class Node(object): """ DPA operational data for a particular node .. attribute:: node_name <key> Node ID **type**\: str **pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+) .. attribute:: internal_tcam_resources Internal TCAM Resource Information **type**\: :py:class:`InternalTcamResources <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes.Node.InternalTcamResources>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.node_name = None self.internal_tcam_resources = Controller.Dpa.Nodes.Node.InternalTcamResources() self.internal_tcam_resources.parent = self class InternalTcamResources(object): """ Internal TCAM Resource Information .. attribute:: npu_tcam npu tcam **type**\: list of :py:class:`NpuTcam <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.npu_tcam = YList() self.npu_tcam.parent = self self.npu_tcam.name = 'npu_tcam' class NpuTcam(object): """ npu tcam .. attribute:: npu_id npu id **type**\: int **range:** 0..4294967295 .. attribute:: tcam_bank tcam bank **type**\: list of :py:class:`TcamBank <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam.TcamBank>` """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.npu_id = None self.tcam_bank = YList() self.tcam_bank.parent = self self.tcam_bank.name = 'tcam_bank' class TcamBank(object): """ tcam bank .. attribute:: bank_db bank db **type**\: list of :py:class:`BankDb <ydk.models.cisco_ios_xr.Cisco_IOS_XR_fia_internal_tcam_oper.Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam.TcamBank.BankDb>` .. attribute:: bank_free_entries bank free entries **type**\: int **range:** 0..4294967295 .. attribute:: bank_id bank id **type**\: str .. attribute:: bank_inuse_entries bank inuse entries **type**\: int **range:** 0..4294967295 .. attribute:: bank_key_size bank key size **type**\: str .. attribute:: nof_dbs nof dbs **type**\: int **range:** 0..4294967295 .. attribute:: owner owner **type**\: str """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.bank_db = YList() self.bank_db.parent = self self.bank_db.name = 'bank_db' self.bank_free_entries = None self.bank_id = None self.bank_inuse_entries = None self.bank_key_size = None self.nof_dbs = None self.owner = None class BankDb(object): """ bank db .. attribute:: db_id db id **type**\: int **range:** 0..4294967295 .. attribute:: db_inuse_entries db inuse entries **type**\: int **range:** 0..4294967295 .. attribute:: db_prefix db prefix **type**\: str """ _prefix = 'fia-internal-tcam-oper' _revision = '2015-11-09' def __init__(self): self.parent = None self.db_id = None self.db_inuse_entries = None self.db_prefix = None @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-fia-internal-tcam-oper:bank-db' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.db_id is not None: return True if self.db_inuse_entries is not None: return True if self.db_prefix is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam.TcamBank.BankDb']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-fia-internal-tcam-oper:tcam-bank' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.bank_db is not None: for child_ref in self.bank_db: if child_ref._has_data(): return True if self.bank_free_entries is not None: return True if self.bank_id is not None: return True if self.bank_inuse_entries is not None: return True if self.bank_key_size is not None: return True if self.nof_dbs is not None: return True if self.owner is not None: return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam.TcamBank']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-fia-internal-tcam-oper:npu-tcam' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.npu_id is not None: return True if self.tcam_bank is not None: for child_ref in self.tcam_bank: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes.Node.InternalTcamResources.NpuTcam']['meta_info'] @property def _common_path(self): if self.parent is None: raise YPYModelError('parent is not set . Cannot derive path.') return self.parent._common_path +'/Cisco-IOS-XR-fia-internal-tcam-oper:internal-tcam-resources' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.npu_tcam is not None: for child_ref in self.npu_tcam: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes.Node.InternalTcamResources']['meta_info'] @property def _common_path(self): if self.node_name is None: raise YPYModelError('Key property node_name is None') return '/Cisco-IOS-XR-fia-internal-tcam-oper:controller/Cisco-IOS-XR-fia-internal-tcam-oper:dpa/Cisco-IOS-XR-fia-internal-tcam-oper:nodes/Cisco-IOS-XR-fia-internal-tcam-oper:node[Cisco-IOS-XR-fia-internal-tcam-oper:node-name = ' + str(self.node_name) + ']' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.node_name is not None: return True if self.internal_tcam_resources is not None and self.internal_tcam_resources._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes.Node']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-fia-internal-tcam-oper:controller/Cisco-IOS-XR-fia-internal-tcam-oper:dpa/Cisco-IOS-XR-fia-internal-tcam-oper:nodes' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.node is not None: for child_ref in self.node: if child_ref._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa.Nodes']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-fia-internal-tcam-oper:controller/Cisco-IOS-XR-fia-internal-tcam-oper:dpa' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.nodes is not None and self.nodes._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller.Dpa']['meta_info'] @property def _common_path(self): return '/Cisco-IOS-XR-fia-internal-tcam-oper:controller' def is_config(self): ''' Returns True if this instance represents config data else returns False ''' return False def _has_data(self): if self.dpa is not None and self.dpa._has_data(): return True return False @staticmethod def _meta_info(): from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_fia_internal_tcam_oper as meta return meta._meta_table['Controller']['meta_info']
apache-2.0
-5,383,710,869,859,080,000
37.082136
276
0.405155
false
adamfisk/littleshoot-client
server/appengine/littleshoot/jsonControllerUtils.py
1
1718
from django.http import HttpResponse from django.http import HttpResponseNotFound from django.http import HttpResponseBadRequest import logging #import logging.config #logging.config.fileConfig('logging.conf') """ public static void writeResponse(final HttpServletRequest request, final HttpServletResponse response, final String data) throws IOException { final String responseString; final String functionName = request.getParameter("callback"); if (StringUtils.isBlank(functionName)) { m_log.debug("No response function, sending raw JSON"); responseString = data; } else { responseString = functionName+"("+data+");"; } m_log.trace("Function: "+functionName); response.setContentType("application/json"); final OutputStream os = response.getOutputStream(); m_log.debug("Writing javascript callback."); os.write(responseString.getBytes("UTF-8")); os.flush(); } """ def writeResponse(request, data): logging.info('Writing data %s to response', data) functionName = request.REQUEST.get('callback') if functionName is None: #return HttpResponseBadRequest('No callback specified') responseString = data mimeType = 'application/json' else: responseString = functionName responseString += '(' responseString += data responseString += ')' mimeType = 'text/javascript' logging.info('Writing response: %s', responseString) return HttpResponse(responseString, mimetype=mimeType)
gpl-2.0
-2,270,159,987,060,000,300
28.118644
70
0.62922
false
fbergroth/autosort
autosort/cli.py
1
1180
import argparse import os import sys from .sorting import sort_imports def create_parser(): parser = argparse.ArgumentParser(prog='autosort') parser.add_argument('files', nargs='+', help='files to sort') parser.add_argument('-v', '--verbose', action='store_true', help='show verbose output') return parser def parse_args(args): parser = create_parser() args = parser.parse_args(args) return args def find_pyfiles(paths): for path in paths: if os.path.isdir(path): for dirpath, _, files in os.walk(path): for file in files: if file.endswith('.py'): yield os.path.join(dirpath, file) else: yield path def main(): args = parse_args(sys.argv[1:]) for file in find_pyfiles(args.files): with open(file) as f: input = f.read() output = sort_imports(input, file) changed = input != output if changed: with open(file, 'w') as f: f.write(output) if args.verbose: print('{0} {1}'.format('>>>' if changed else '...', file))
mit
-5,355,633,666,017,201,000
24.106383
70
0.549153
false
palladius/gcloud
packages/gcutil-1.7.1/lib/google_compute_engine/gcutil/command_base.py
1
62836
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base command types for interacting with Google Compute Engine.""" import datetime import httplib import inspect import json import os import re import sys import time import traceback from apiclient import discovery from apiclient import errors from apiclient import model import httplib2 import iso8601 import oauth2client.client as oauth2_client from google.apputils import app from google.apputils import appcommands import gflags as flags from gcutil import auth_helper from gcutil import flags_cache from gcutil import gcutil_logging from gcutil import metadata_lib from gcutil import scopes from gcutil import thread_pool from gcutil import utils from gcutil import version from gcutil import table_formatter FLAGS = flags.FLAGS LOGGER = gcutil_logging.LOGGER CLIENT_ID = 'google-api-client-python-compute-cmdline/1.0' CURRENT_VERSION = version.__default_api_version__ SUPPORTED_VERSIONS = version.__supported_api_versions__ GLOBAL_ZONE_NAME = 'global' # The ordering to impose on machine types when prompting the user for # a machine type choice. MACHINE_TYPE_ORDERING = ['standard', 'highcpu', 'highmem'] flags.DEFINE_enum( 'service_version', CURRENT_VERSION, SUPPORTED_VERSIONS, 'Google computation service version.') flags.DEFINE_string( 'api_host', 'https://www.googleapis.com/', 'API host name') flags.DEFINE_string( 'project', None, 'The name of the Google Compute Engine project.') flags.DEFINE_string( 'project_id', None, 'The name of the Google Compute Engine project. ' 'Deprecated, use --project instead.') flags.DEFINE_bool( 'print_json', False, 'Output JSON instead of tabular format. Deprecated, use --format=json.') flags.DEFINE_enum( 'format', 'table', ('table', 'sparse', 'json', 'csv', 'names'), 'Format for command output. Options include:' '\n table: formatted table output' '\n sparse: simpler table output' '\n json: raw json output (formerly --print_json)' '\n csv: csv format with header' '\n names: list of resource names only, no header') flags.DEFINE_enum( 'long_values_display_format', 'elided', ['elided', 'full'], 'The display preference for long table values.') flags.DEFINE_bool( 'fetch_discovery', False, 'If true, grab the API description from the discovery API.') flags.DEFINE_bool( 'synchronous_mode', True, 'If false, return immediately after posting a request.') flags.DEFINE_integer( 'sleep_between_polls', 3, 'The time to sleep between polls to the server in seconds.', 1, 600) flags.DEFINE_integer( 'max_wait_time', 240, 'The maximum time to wait for an asynchronous operation to complete in ' 'seconds.', 30, 1200) flags.DEFINE_string( 'trace_token', None, 'Trace the API requests using a trace token provided by Google.') flags.DEFINE_integer( 'concurrent_operations', 10, 'The maximum number of concurrent operations to have in progress at once. ' 'Increasing this number will probably result in hitting rate limits.', 1, 20) class Error(Exception): """The base class for this tool's error reporting infrastructure.""" class CommandError(Error): """Raised when a command hits a general error.""" # A wrapper around an Api that adds a trace keyword to the Api. class TracedApi(object): """Wrap an Api to add a trace keyword argument.""" def __init__(self, obj, trace_token): def Wrap(func): def _Wrapped(*args, **kwargs): # Add a trace= URL parameter to the method call. if trace_token: kwargs['trace'] = trace_token return func(*args, **kwargs) return _Wrapped # Find all public methods and interpose them. for method in inspect.getmembers(obj, (inspect.ismethod)): if not method[0].startswith('__'): setattr(self, method[0], Wrap(method[1])) class TracedComputeApi(object): """Wrap a ComputeApi object to return TracedApis.""" def __init__(self, obj, trace_token): def Wrap(func): def _Wrapped(*args, **kwargs): ret = func(*args, **kwargs) if ret: ret = TracedApi(ret, trace_token) return ret return _Wrapped # Find all our public methods and interpose them. for method in inspect.getmembers(obj, (inspect.ismethod)): if not method[0].startswith('__'): setattr(self, method[0], Wrap(method[1])) class ApiThreadPoolOperation(thread_pool.Operation): """A Thread pool operation that will execute an API request. This will wait for the operation to complete, if appropriate. The result from the object will be the last operation object returned. """ def __init__(self, request, command, wait_for_operation, collection_name=None): """Initializer.""" super(ApiThreadPoolOperation, self).__init__() self._request = request self._command = command self._wait_for_operation = wait_for_operation self._collection_name = collection_name def Run(self): """Execute the request on a separate thread.""" # Note that the httplib2.Http command isn't thread safe. As such, # we need to create a new Http object here. http = self._command.CreateHttp() result = self._request.execute(http=http) if self._wait_for_operation: result = self._command.WaitForOperation( self._command.GetFlags(), time, result, http=http, collection_name=self._collection_name) return result class GoogleComputeCommand(appcommands.Cmd): """Base class for commands that interact with the Google Compute Engine API. Overriding classes must override the SetApi and Handle methods. Attributes: GOOGLE_PROJECT_PATH: The common 'google' project used for storage of shared images and kernels. operation_detail_fields: A set of tuples of (json field name, human readable name) used to generate a pretty-printed detailed description of an operation resource. supported_versions: The list of API versions supported by this tool. safety_prompt: A boolean indicating whether the command requires user confirmation prior to executing. """ GOOGLE_PROJECT_PATH = 'projects/google' operation_default_sort_field = 'insert-time' operation_summary_fields = (('name', 'name'), ('zone', 'zone'), ('status', 'status'), ('status-message', 'statusMessage'), ('target', 'targetLink'), ('insert-time', 'insertTime'), ('operation-type', 'operationType'), ('error', 'error.errors.code'), ('warning', 'warnings.code')) operation_detail_fields = (('name', 'name'), ('zone', 'zone'), ('creation-time', 'creationTimestamp'), ('status', 'status'), ('progress', 'progress'), ('status-message', 'statusMessage'), ('target', 'targetLink'), ('target-id', 'targetId'), ('client-operation-id', 'clientOperationId'), ('insert-time', 'insertTime'), ('user', 'user'), ('start-time', 'startTime'), ('end-time', 'endTime'), ('operation-type', 'operationType'), ('error-code', 'httpErrorStatusCode'), ('error-message', 'httpErrorMessage'), ('warning', 'warnings.code'), ('warning-message', 'warnings.message')) # If this is set to True then the arguments and flags for this # command are sorted such that everything that looks like a flag is # pulled out of the arguments. If a command needs unparsed flags # after positional arguments (like ssh) then set this to False. sort_args_and_flags = True def __init__(self, name, flag_values): """Initializes a new instance of a GoogleComputeCommand. Args: name: The name of the command. flag_values: The values of command line flags to be used by the command. """ super(GoogleComputeCommand, self).__init__(name, flag_values) self._credential = None self.supported_versions = SUPPORTED_VERSIONS if hasattr(self, 'safety_prompt'): flags.DEFINE_bool('force', False, 'Override the "%s" prompt' % self.safety_prompt, flag_values=flag_values, short_name='f') def _ReadInSelectedItem(self, menu, menu_name): while True: userinput = raw_input('>>> ').strip() try: selection = int(userinput) if selection in menu: return selection except ValueError: pass print 'Invalid selection, please choose one of the listed ' + menu_name def _PromptForEntry(self, collection_api, collection_name, project=None, auto_select=True, extract_resource_prompt=None, additional_key_func=None): """Prompt the user to select an entry from an API collection. Args: collection_api: The API collection wrapper. collection_name: The name of the collection used in building the prompts. project: A project whose collection to use. Defaults to self._project. auto_select: If True and the collection has a single element then that element is chosen without prompting the user. extract_resource_prompt: A function that takes a resource JSON and returns the resource prompt. If not provided, the resource's 'name' field is going to be used as the default prompt text. additional_key_func: Lambda resource_name -> int. If supplied, this function will be used as the first sort key of the name. Returns: A collection entry as selected by the user or None if the collection is empty; """ choices = utils.All(collection_api.list, project or self._project)['items'] return self._PromptForChoice( choices, collection_name, auto_select, extract_resource_prompt, additional_key_func) def _PromptForChoice(self, choices, collection_name, auto_select=True, extract_resource_prompt=None, additional_key_func=None): """Prompts user to select one of the resources from the choices list. The function will create list of prompts from the list of choices. If caller passed extract_resource_prompt function, the extract_resource_prompt will be called on each resource to generate appropriate prompt text. Prompt strings are sorted alphabetically and offered to the user to select the desired option. The selected resource is then returned to the caller. If the list of choices is empty, None is returned. If there is only one available choice and auto_select is True, user is not prompted but rather, the only available option is returned. Args: choices: List of Google Compute Engine resources from which user should choose. collection_name: Name of the collection to present to the user. auto_select: Boolean. If set to True and only one resource is available in the list of choices, user will not be prompted but rather, the only available option will be chosen. extract_resource_prompt: Lambda resource -> string. If supplied, this function will be called on each resource to generate the prompt string for the resource. additional_key_func: Lambda resource_name -> int. If supplied, this function will be used as the first sort key of the name. Returns: The resource user selected. Returns the actual resource as the JSON object model represented as Python dictionary. """ if extract_resource_prompt is None: def ExtractResourcePrompt(resource): return resource['name'].split('/')[-1] extract_resource_prompt = ExtractResourcePrompt if not choices: return None if auto_select and len(choices) == 1: print 'Selecting the only available %s: %s' % ( collection_name, choices[0]['name']) if 'deprecated' in choices[0]: LOGGER.warn('Warning: %s is deprecated!', choices[0]['name']) return choices[0] deprecated_choices = [(extract_resource_prompt(ch) + ' (DEPRECATED)', ch) for ch in choices if 'deprecated' in ch and ch['deprecated']['state'] == 'DEPRECATED'] deprecated_choices.sort(key=lambda pair: pair[0]) choices = [(extract_resource_prompt(ch), ch) for ch in choices if not 'deprecated' in ch] if additional_key_func: key_func = lambda pair: (additional_key_func(pair[0]), pair[0]) else: key_func = lambda pair: pair[0] choices.sort(key=key_func) choices.extend(deprecated_choices) for i, (short_name, unused_choice) in enumerate(choices): print '%d: %s' % (i + 1, short_name) selection = self._ReadInSelectedItem( range(1, len(choices) + 1), collection_name + 's') return choices[selection - 1][1] def _PromptForKernel(self): """Prompt the user to select a kernel from the available kernels. Returns: A kernel resource selected by the user, or None if no kernels available. """ def ExtractKernelPrompt(kernel): return self._PresentElement( self.NormalizeGlobalResourceName('google', 'kernels', kernel['name'])) return self._PromptForEntry( self._kernels_api, 'kernel', 'google', extract_resource_prompt=ExtractKernelPrompt) def _PromptForImage(self): choices = (utils.All(self._images_api.list, 'google')['items'] + utils.All(self._images_api.list, self._project)['items']) def ExtractImagePrompt(image): return self._PresentElement(image['selfLink']) return self._PromptForChoice(choices, 'image', True, ExtractImagePrompt) def _PromptForZone(self): """Prompt the user to select a zone from the current list. Returns: A zone resource as selected by the user. """ now = datetime.datetime.utcnow() def ExtractZonePrompt(zone): """Creates a text prompt for a zone resource. Includes maintenance information for zones that enter maintenance in less than two weeks. Args: zone: The Google Compute Engine zone resource. Returns: string to represent a specific zone choice to present to the user. """ name = zone['name'].split('/')[-1] maintenance = GoogleComputeCommand._GetNextMaintenanceStart(zone, now) if maintenance is not None: if maintenance < now: msg = 'currently in maintenance' else: delta = maintenance - now if delta >= datetime.timedelta(weeks=2): msg = None elif delta.days < 1: msg = 'maintenance starts in less than 24 hours' elif delta.days == 1: msg = 'maintenance starts in 1 day' else: msg = 'maintenance starts in %s days' % delta.days if msg: return '%s (%s)' % (name, msg) return name return self._PromptForEntry(self._zones_api, 'zone', extract_resource_prompt=ExtractZonePrompt) def _PromptForDisk(self): """Prompt the user to select a disk from the current list. Returns: A disk resource as selected by the user. """ return self._PromptForEntry(self._disks_api, 'disk', auto_select=False) def _GetMachineTypeSecondarySortScore(self, value): """Returns a score for the given machine type to be used in sorting. This is used to ensure that the lower cost machine types are the first ones displayed to the user. Args: value: The name of a machine type. Returns: An integer that defines a sort order. """ for i in range(len(MACHINE_TYPE_ORDERING)): if MACHINE_TYPE_ORDERING[i] in value: return i return len(MACHINE_TYPE_ORDERING) def _PromptForMachineType(self): """Prompt the user to select a machine type from the current list. Returns: A machine type resource as selected by the user. """ return self._PromptForEntry( self._machine_types_api, 'machine type', additional_key_func=self._GetMachineTypeSecondarySortScore) @staticmethod def _GetNextMaintenanceStart(zone, now=None): def ParseDate(date): # Removes the timezone awareness from the timestamp we get back # from the server. This is necessary because utcnow() is # timezone unaware and it's much easier to remove timezone # awareness than to add it in. The latter option requires more # code and possibly other libraries. return iso8601.parse_date(date).replace(tzinfo=None) if now is None: now = datetime.datetime.utcnow() maintenance = zone.get('maintenanceWindows') next_window = None if maintenance: # Find the next maintenance window. for mw in maintenance: # Is it already past? end = mw.get('endTime') if end: end = ParseDate(end) if end < now: # Skip maintenance because it has occurred in the past. continue begin = mw.get('beginTime') if begin: begin = ParseDate(begin) if next_window is None or begin < next_window: next_window = begin return next_window def _GetZone(self, zone=None): """Notifies the user if the given zone will enter maintenance soon. The given zone can be None in which case the user is prompted for a zone. This method is intended to provide a warning to the user if he or she seeks to create a disk or instance in a zone that will enter maintenance in less than two weeks. Args: zone: The name of the zone chosen, or None. Returns: The given zone or the zone chosen through the prompt. """ if zone is None: zone_resource = self._PromptForZone() zone = zone_resource['name'] else: zone = zone.split('/')[-1] zone_resource = self._zones_api.get( project=self._project, zone=zone).execute() # Warns the user if there is an upcoming maintenance for the # chosen zone. Times returned from the server are in UTC. now = datetime.datetime.utcnow() next_win = GoogleComputeCommand._GetNextMaintenanceStart( zone_resource, now) if next_win is not None: if next_win < now: msg = 'is unavailable due to maintenance' else: delta = next_win - now if delta >= datetime.timedelta(weeks=2): msg = None elif delta.days < 1: msg = 'less than 24 hours' elif delta.days == 1: msg = '1 day' else: msg = '%s days' % delta.days if msg: msg = 'will become unavailable due to maintenance in %s' % msg if msg: LOGGER.warn('%s %s.', zone, msg) return zone def _GetZones(self): """Retrieves the full list of zones available to this project. Returns: List of zones available to this project. """ return utils.AllNames(self._zones_api.list, self._project) def _AuthenticateWrapper(self, http): """Adds the OAuth token into http request. Args: http: An instance of httplib2.Http or something that acts like it. Returns: httplib2.Http like object. Raises: CommandError: If the credentials can't be found. """ if not self._credential: self._credential = auth_helper.GetCredentialFromStore( self.__GetRequiredAuthScopes()) if not self._credential: raise CommandError( 'Could not get valid credentials for API.') return self._credential.authorize(http) def _ParseArgumentsAndFlags(self, flag_values, argv): """Parses the command line arguments for the command. This method matches up positional arguments based on the signature of the Handle method. It also parses the flags found on the command line. argv will contain, <main python file>, positional-arguments, flags... Args: flag_values: The flags list to update argv: The command line argument list Returns: The list of position arguments for the given command. Raises: CommandError: If any problems occur with parsing the commands (e.g., type mistmatches, out of bounds, unknown commands, ...). """ # If we are sorting args and flags, kick the flag parser into gnu # mode and parse some more. argv will be all of the unparsed args # after this. if self.sort_args_and_flags: try: old_gnu_mode = flag_values.IsGnuGetOpt() flag_values.UseGnuGetOpt(True) argv = flag_values(argv) except (flags.IllegalFlagValue, flags.UnrecognizedFlagError) as e: raise CommandError(e) finally: flag_values.UseGnuGetOpt(old_gnu_mode) # We use the same positional arguments used by the command's Handle method. # For AddDisk this will be, ['self', 'disk_name']. argspec = inspect.getargspec(self.Handle) # Skip the implicit argument 'self' and take the list of # positional command args. default_count = len(argspec.defaults) if argspec.defaults else 0 pos_arg_names = argspec.args[1:] # We then parse off values for those positional arguments from argv. # Note that we skip the first argument, as that is the command path. pos_arg_values = argv[1:len(pos_arg_names) + 1] # Take all the arguments past the positional arguments. If there # is a var_arg on the command this will get passed in. unparsed_args = argv[len(pos_arg_names) + 1:] # If we did not get enough positional argument values print error and exit. if len(pos_arg_names) - default_count > len(pos_arg_values): missing_args = pos_arg_names[len(pos_arg_values):] missing_args = ['"%s"' % a for a in missing_args] raise CommandError('Positional argument %s is missing.' % ', '.join(missing_args)) # If users specified flags in place of positional argument values, # print error and exit. for (name, value) in zip(pos_arg_names, pos_arg_values): if value.startswith('--'): raise CommandError('Invalid positional argument value \'%s\' ' 'for argument \'%s\'\n' % (value, name)) # If there are any unparsed args and the command is not expecting # varargs, print error and exit. if (unparsed_args and # MOE_begin_strip # This is a temporary measure to allow new-style commands to # have varargs without having a Handle method. # MOE_end_strip not getattr(self, 'has_varargs', False) and not argspec.varargs): unparsed_args = ['"%s"' % a for a in unparsed_args] raise CommandError('Unknown argument: %s' % ', '.join(unparsed_args)) return argv[1:] def _BuildComputeApi(self, http): """Builds the Google Compute Engine API to use. Args: http: a httplib2.Http like object for communication. Returns: The API object to use. """ # For versions of the apiclient library prior to v1beta2, we need to # specify the LoggingJsonModel in order to get request and response # logging to work. json_model = (model.LoggingJsonModel() if 'LoggingJsonModel' in dir(model) else model.JsonModel()) if FLAGS.fetch_discovery: discovery_uri = (FLAGS.api_host + 'discovery/v1/apis/{api}/{apiVersion}/rest') return self.WrapApiIfNeeded(discovery.build( 'compute', FLAGS.service_version, http=http, discoveryServiceUrl=discovery_uri, model=json_model)) else: discovery_file_name = os.path.join( os.path.dirname(__file__), 'compute/%s.json' % FLAGS.service_version) try: discovery_file = file(discovery_file_name, 'r') discovery_doc = discovery_file.read() discovery_file.close() except IOError: raise CommandError( 'Could not load discovery document from disk. Perhaps try ' '--fetch_discovery. \nFile: %s' % discovery_file_name) return self.WrapApiIfNeeded(discovery.build_from_document( discovery_doc, base=FLAGS.api_host, http=http, model=json_model)) @staticmethod def WrapApiIfNeeded(api): """Wraps the API to enable logging or tracing.""" if FLAGS.trace_token: return TracedComputeApi(api, 'token:%s' % (FLAGS.trace_token)) return api @staticmethod def DenormalizeResourceName(resource_name): """Return the relative name for the given resource. Args: resource_name: The name of the resource. This can be either relative or absolute. Returns: The name of the resource relative to its enclosing collection. """ return resource_name.strip('/').rpartition('/')[2] @staticmethod def DenormalizeProjectName(flag_values): """Denormalize the 'project' entry in the given FlagValues instance. Args: flag_values: The FlagValues instance to update. Raises: CommandError: If the project is missing or malformed. """ project = flag_values.project or flag_values.project_id if not project: raise CommandError( 'You must specify a project name using the "--project" flag.') elif project.lower() != project: raise CommandError( 'Characters in project name must be lowercase: %s.' % project) project = project.strip('/') if project.startswith('projects/'): project = project[len('projects/'):] if '/' in project: raise CommandError('Project names can contain a \'/\' only when they ' 'begin with \'projects/\'.') flag_values.project = project flag_values.project_id = None def _GetBaseApiUrl(self): """Get the base API URL given the current flag_values. Returns: The base API URL. For example, https://www.googleapis.com/compute/v1beta14. """ return '%scompute/%s' % (self._flags.api_host, self._flags.service_version) def _AddBaseUrlIfNecessary(self, resource_path): """Add the base URL to a resource_path if required by the service_version. Args: resource_path: The resource path to add the URL to. Returns: A full API-usable reference to the given resource_path. """ if not self._GetBaseApiUrl() in resource_path: return '%s/%s' % (self._GetBaseApiUrl(), resource_path) return resource_path def _StripBaseUrl(self, value): """Removes the a base URL from the string if it exists. Note that right now the server may not return exactly the right base URL so we strip off stuff that looks like a base URL. Args: value: The string to strip the base URL from. Returns: A string without the base URL. """ pattern = '^' + re.escape(self._flags.api_host) + r'compute/\w*/' return re.sub(pattern, '', value) def NormalizeResourceName(self, project, scope_name, collection_name, resource_name): """Return the full name for the given resource. Args: project: The name of the project containing the resource. scope_name: The scope of the collection containing the resource. collection_name: The name of the collection containing the resource. resource_name: The name of the resource. This can be either relative or absolute. Returns: The full URL of the resource. """ resource_name = resource_name.strip('/') if (collection_name == 'machine-types' and 'v1beta13' in self.supported_versions and self._IsUsingAtLeastApiVersion('v1beta13')): collection_name = 'machineTypes' if (resource_name.startswith('projects/') or resource_name.startswith(collection_name + '/') or resource_name.startswith(self._flags.api_host)): # This does not appear to be a relative name. return self._AddBaseUrlIfNecessary(resource_name) absolute_name = 'projects/%s/%s/%s' % (project, collection_name, resource_name) if self._IsUsingAtLeastApiVersion('v1beta14') and scope_name: absolute_name = 'projects/%s/%s/%s/%s' % (project, scope_name, collection_name, resource_name) return self._AddBaseUrlIfNecessary(absolute_name) def NormalizeTopLevelResourceName(self, project, collection, resource): """Return the full name for the given resource. Args: project: The name of the project containing the resource. collection: The name of the collection containing the resource. resource: The name of the resource. This can be either relative or absolute. Returns: The full URL of the resource. """ return self.NormalizeResourceName(project, None, collection, resource) def NormalizeGlobalResourceName(self, project, collection, resource): """Return the full name for the given resource. Args: project: The name of the project containing the resource. collection: The name of the collection containing the resource. resource: The name of the resource. This can be either relative or absolute. Returns: The full URL of the resource. """ return self.NormalizeResourceName(project, 'global', collection, resource) def NormalizePerZoneResourceName(self, project, zone, collection, resource): """Return the full name for the given resource. Args: project: The name of the project containing the resource. zone: The name of the zone containing the resource. collection: The name of the collection containing the resource. resource: The name of the resource. This can be either relative or absolute. Returns: The full URL of the resource. """ return self.NormalizeResourceName(project, 'zones/%s' % zone, collection, resource) def GetZoneForResource(self, api, resource_name, fail_if_not_found=True): """Gets the unqualified zone name for a given resource. The function first tries to use 'zone' parameter if set, but falls back to searching for the resource name across zones. Args: api: The API service that must expose 'list' method. resource_name: Name of the resource to find. fail_if_not_found: Raise an error when the resource is not found. Returns: Unqualified name of the zone the resource belongs to. Raises: CommandError: If the zone for the resource cannot be resolved. """ # If the resource is already project- and zone-qualified, use the zone. if not resource_name: return None resource_name_parts = self._StripBaseUrl(resource_name).split('/') if (len(resource_name_parts) > 3 and resource_name_parts[0] == 'projects' and resource_name_parts[2] == 'zones'): return resource_name_parts[3] if self._flags.zone == GLOBAL_ZONE_NAME: return None if self._flags.zone: return self._flags.zone filter_expression = utils.RegexesToFilterExpression( [self.DenormalizeResourceName(resource_name)]) items = [] for zone in self._GetZones(): # Limiting the number of results to 2, since anything other than one # is an error. sub_result = utils.All(api.list, self._project, max_results=2, filter=filter_expression, zone=zone) items.extend(sub_result.get('items', [])) if len(items) == 1: zone = self._GetZoneFromSelfLink(items[0]['selfLink']) LOGGER.info('Zone for %s detected as %s.', repr(resource_name), repr(zone or GLOBAL_ZONE_NAME)) LOGGER.warning('Consider passing \'--zone=%s\' to avoid the unnecessary ' 'zone lookup which requires extra API calls.', zone or GLOBAL_ZONE_NAME) return zone if fail_if_not_found: raise CommandError('Could not determine the zone of \'%s\'.' % resource_name) else: return None def _GetZoneFromSelfLink(self, self_link): """Parses the given self-link and returns per-project zone name.""" resource_name = self._StripBaseUrl(self_link) parts = resource_name.split('/') if len(parts) > 3 and parts[0] == 'projects' and parts[2] == 'zones': return parts[3] else: return None def _HandleSafetyPrompt(self, positional_arguments): """If a safety prompt is present on the class, handle it now. By defining a field 'safety_prompt', derived classes can request that the user confirm a dangerous operation prior to execution, e.g. deleting a resource. Users may override this check by passing the --force flag on the command line. Args: positional_arguments: A list of positional argument strings. Returns: True if the command should continue, False if not. """ if hasattr(self, 'safety_prompt'): if not self._flags.force: prompt = self.safety_prompt if positional_arguments: prompt = '%s %s' % (prompt, ', '.join(positional_arguments)) print '%s? [y/N]' % prompt userinput = raw_input('>>> ') if not userinput: userinput = 'n' userinput = userinput.lstrip()[:1].lower() if not userinput == 'y': return False return True def _IsUsingAtLeastApiVersion(self, required_version): """Determine if in-use API version is at least the specified version. Args: required_version: The API version to test. Returns: True if the given API version is equal or newer than the in-use API version, False otherwise. Raises: CommandError: If the specified API version is not known. """ if not (required_version in self.supported_versions and self._flags.service_version in self.supported_versions): raise CommandError('API version %s/%s unknown' % ( required_version, self._flags.service_version)) for index, known_version in enumerate(self.supported_versions): if known_version == self._flags.service_version: current_index = index if known_version == required_version: given_index = index return current_index >= given_index def _GetResourceApiKind(self, resource): """Determine the API version driven resource 'kind'. Args: resource: The resource type to generate a 'kind' string for. Returns: A string containing the API 'kind' """ return 'compute#%s' % resource def _ErrorInResult(self, result): """Return True if a result should be considered an error.""" ops = [] if self.IsResultAnOperation(result): ops = [result] elif self.IsResultAList(result): ops = result.get('items', []) for op in ops: # If op contains errors, it will be of the form: # {'error': {'errors': [...]}, ...} if (self._flags.synchronous_mode and op.get('error', {}).get('errors', [])): return True return False def Run(self, argv): """Run the command, printing the result. Args: argv: The arguments to the command. Returns: 0 if the command completes successfully, otherwise 1. """ try: pos_arg_values = self._ParseArgumentsAndFlags(FLAGS, argv) gcutil_logging.SetupLogging() # Synchronize the flags with any cached values present. flags_cache_obj = flags_cache.FlagsCache() flags_cache_obj.SynchronizeFlags() self.SetFlagDefaults() self.DenormalizeProjectName(FLAGS) self.SetFlags(FLAGS) auth_retry = True error_in_result = False while auth_retry: try: result, exceptions = self.RunWithFlagsAndPositionalArgs( self._flags, pos_arg_values) auth_retry = False self.PrintResult(result) self.LogExceptions(exceptions) if self._ErrorInResult(result): error_in_result = True # If we just have an AccessTokenRefreshError raise it so # that we retry. for exception in exceptions: if isinstance(exception, oauth2_client.AccessTokenRefreshError): if not result: raise exception else: LOGGER.warning('Refresh error when running multiple ' 'operations. Not automatically retrying as ' 'some requests succeeded.') break except oauth2_client.AccessTokenRefreshError, e: if not auth_retry: raise # Retrying the operation will induce OAuth2 reauthentication and # creation of the new refresh token. LOGGER.info('OAuth2 token refresh error (%s), retrying.\n', str(e)) auth_retry = False has_errors = bool(exceptions or error_in_result) # Updates the flags cache file only when the command exits with # a non-zero error code. if not has_errors: flags_cache_obj.UpdateCacheFile() return has_errors except errors.HttpError, http_error: self.LogHttpError(http_error) return 1 except app.UsageError: raise except: sys.stderr.write('%s\n' % '\n'.join( traceback.format_exception_only(sys.exc_type, sys.exc_value))) LOGGER.debug(traceback.format_exc()) return 1 def CreateHttp(self): """Construct an HTTP object to use with an API call. This is useful when doing multithreaded work as httplib2 Http objects aren't threadsafe. Returns: An object that implements the httplib2.Http interface """ http = httplib2.Http() http = self._AuthenticateWrapper(http) return http def RunWithFlagsAndPositionalArgs(self, flag_values, pos_arg_values): """Run the command with the parsed flags and positional arguments. This method is what a subclass should override if they do not want to use the REST API. Args: flag_values: The parsed FlagValues instance. pos_arg_values: The positional arguments for the Handle method. Raises: CommandError: If user choses to not proceed with the command at safety prompt. Returns: A tuple (result, exceptions) where results is a JSON-serializable result and exceptions is a list of exceptions that were thrown when running this command. """ http = self.CreateHttp() compute_api = self._BuildComputeApi(http) if self._IsUsingAtLeastApiVersion('v1beta14'): self._zone_operations_api = compute_api.zoneOperations() self._global_operations_api = compute_api.globalOperations() else: self._global_operations_api = compute_api.operations() self.SetApi(compute_api) if not self._HandleSafetyPrompt(pos_arg_values): raise CommandError('Operation aborted') exceptions = [] result = self.Handle(*pos_arg_values) if isinstance(result, tuple): result, exceptions = result if self._flags.synchronous_mode: result = self.WaitForOperation(flag_values, time, result) if isinstance(result, list): result = self.MakeListResult(result, 'operationList') return result, exceptions def IsResultAnOperation(self, result): """Determine if the result object is an operation.""" try: return ('kind' in result and result['kind'].endswith('#operation')) except TypeError: return False def IsResultAList(self, result): """Determine if the result object is a list of some sort.""" try: return ('kind' in result and result['kind'].endswith('List')) except TypeError: return False def MakeListResult(self, results, kind_base): """Given an array of results, create an list object for those results. Args: results: The list of results. kind_base: The kind of list to create Returns: A synthetic list resource created from the list of individual results. """ return { 'kind': self._GetResourceApiKind(kind_base), 'items': results, 'note': ('This JSON result is based on multiple API calls. This ' 'object was created in the client.') } def ExecuteRequests(self, requests, collection_name=None): """Execute a list of requests in a thread pool. Args: requests: A list of requests objects to execute. collection_name: The name of the collection. This is optional and is useful for subclasses that mutate more than one resource type. Returns: A tuple with (results, exceptions) where result list is the list of all results and exceptions is any exceptions that were raised. """ tp = thread_pool.ThreadPool(self._flags.concurrent_operations) ops = [] for request in requests: op = ApiThreadPoolOperation( request, self, self._flags.synchronous_mode, collection_name=collection_name) ops.append(op) tp.Add(op) tp.WaitShutdown() results = [] exceptions = [] for op in ops: if op.RaisedException(): exceptions.append(op.Result()) else: if isinstance(op.Result(), list): results.extend(op.Result()) else: results.append(op.Result()) return (results, exceptions) def WaitForOperation(self, flag_values, timer, result, http=None, collection_name=None): """Wait for a potentially asynchronous operation to complete. Args: flag_values: The parsed FlagValues instance. timer: An implementation of the time object, providing time and sleep methods. result: The result of the request, potentially containing an operation. http: An optional httplib2.Http object to use for requests. Returns: The synchronous return value, usually an operation object. """ resource = None if not self.IsResultAnOperation(result): return result start_time = timer.time() operation_type = result['operationType'] target = result['targetLink'].split('/')[-1] while result['status'] != 'DONE': if timer.time() - start_time >= flag_values.max_wait_time: LOGGER.warn('Timeout reached. %s of %s has not yet completed. ' 'The operation (%s) is still %s.', operation_type, target, result['name'], result['status']) break # Timeout collection_name = (collection_name or getattr(self, 'resource_collection_name', None)) if collection_name: singular_collection_name = utils.Singularize(collection_name) qualified_name = '%s %s' % (singular_collection_name, target) else: qualified_name = target LOGGER.info('Waiting for %s of %s. Sleeping for %ss.', operation_type, qualified_name, flag_values.sleep_between_polls) timer.sleep(flag_values.sleep_between_polls) kwargs = { 'project': self._project, 'operation': result['name'], } poll_api = self._global_operations_api if self._IsUsingAtLeastApiVersion('v1beta14'): operation_zone = self._GetZoneFromSelfLink(result['selfLink']) if operation_zone: kwargs['zone'] = operation_zone poll_api = self._zone_operations_api # Poll the operation for status. request = poll_api.get(**kwargs) result = request.execute(http=http) else: if result['operationType'] != 'delete' and 'error' not in result: # We are going to replace the operation with its resulting resource. # Save the operation to return as well. target_link = result['targetLink'] http = self.CreateHttp() response, data = http.request(target_link, method='GET') if 200 <= response.status <= 299: resource = json.loads(data) if resource is not None: results = [] results.append(result) results.append(resource) return results return result def CommandGetHelp(self, unused_argv, cmd_names=None): """Get help for command. Args: unused_argv: Remaining command line flags and arguments after parsing command (that is a copy of sys.argv at the time of the function call with all parsed flags removed); unused in this implementation. cmd_names: By default, if help is being shown for more than one command, and this command defines _all_commands_help, then _all_commands_help will be displayed instead of the class doc. cmd_names is used to determine the number of commands being displayed and if only a single command is display then the class doc is returned. Returns: __doc__ property for command function or a message stating there is no help. """ help_str = super( GoogleComputeCommand, self).CommandGetHelp(unused_argv, cmd_names) return '%s\n\nUsage: %s' % (help_str, self._GetUsage()) def _GetUsage(self): """Get the usage string for the command, used to print help messages. Returns: The usage string for the command. """ res = '%s [--global_flags] %s [--command_flags]' % ( os.path.basename(sys.argv[0]), self._command_name) args = getattr(self, 'positional_args', None) if args: res = '%s %s' % (res, args) return res def Handle(self): """Actual implementation of the command. Derived classes override this method, adding positional arguments to this method as required. Returns: Either a single JSON-serializable result or a tuple of a result and a list of exceptions that are thrown. """ raise NotImplementedError() def SetFlags(self, flag_values): """Set the flags to be used by the command. Args: flag_values: The parsed flags values. """ self._flags = flag_values self._project = self._flags.project def GetFlags(self): """Get the flags object used by the command.""" return self._flags def SetApi(self, api): """Set the Google Compute Engine API for the command. Derived classes override this method, pulling the necessary domain specific API out of the global API. Args: api: The Google Compute Engine API used by this command. """ raise NotImplementedError() def _PresentElement(self, field_value): """Format a json value for tabular display. Strips off the project qualifier if present and elides the value if it won't fit inside of a max column size of 64 characters. Args: field_value: The json field value to be formatted. Returns: The formatted json value. """ if isinstance(field_value, basestring): field_value = self._StripBaseUrl(field_value).strip('/') if field_value.startswith('projects/' + self._project): field_value_parts = field_value.split('/') if len(field_value_parts) > 3: field_value = '/'.join(field_value_parts[3:]) else: field_value = field_value_parts[-1] if (self._flags.long_values_display_format == 'elided' and len(field_value) > 64): return field_value[:31] + '..' + field_value[-31:] return field_value def _FlattenObjectToList(self, instance_json, name_map): """Convert a json instance to a dictionary for output. Args: instance_json: A JSON object represented as a python dict. name_map: A list of key, json-path object tuples where the json-path object is either a string or a list of strings. ('name', 'container.id') or ('name', ['container.id.new', 'container.id.old']) Returns: A list of extracted values selected by the associated JSON path. In addition, names are simplified to their shortest path components. """ def ExtractSubKeys(json_object, subkey): """Extract and flatten a (possibly-repeated) field in a json object. Args: json_object: A JSON object represented as a python dict. subkey: a list of path elements, e.g. ['container', 'id']. Returns: [element1, element2, ...] or [] if the subkey could not be found. """ if not subkey: return [self._PresentElement(json_object)] if subkey[0] in json_object: element = json_object[subkey[0]] if isinstance(element, list): return sum([ExtractSubKeys(x, subkey[1:]) for x in element], []) return ExtractSubKeys(element, subkey[1:]) return [] ret = [] for unused_key, paths in name_map: # There may be multiple possible paths indicating the field name due to # versioning changes. Walk through them in order until one is found. if isinstance(paths, basestring): elements = ExtractSubKeys(instance_json, paths.split('.')) else: for path in paths: elements = ExtractSubKeys(instance_json, path.split('.')) if elements: break ret.append(','.join([str(x) for x in elements])) return ret def __AddErrorsForOperation(self, result, table): """Add any errors present in the operation result to the output table. Args: result: The json dictionary returned by the server. table: The pretty printing table to be customized. """ if 'error' in result: table.AddRow(('', '')) table.AddRow(('errors', '')) for error in result['error']['errors']: table.AddRow(('', '')) table.AddRow((' error', error['code'])) table.AddRow((' message', error['message'])) def LogExceptions(self, exceptions): """Log a list of exceptions returned in multithreaded operation.""" for exception in exceptions: if isinstance(exception, errors.HttpError): self.LogHttpError(exception) elif isinstance(exception, Exception): sys.stderr.write('%s\n' % '\n'.join(traceback.format_exception_only( type(exception).__name__, exception))) def LogHttpError(self, http_error): """Do specific logging when we hit an HttpError.""" def AddMessage(messages, error): msg = error.get('message') if msg: messages.add(msg) message = http_error.resp.reason try: data = json.loads(http_error.content) messages = set() if isinstance(data, dict): error = data.get('error', {}) AddMessage(messages, error) for error in error.get('errors', []): AddMessage(messages, error) message = '\n'.join(messages) except ValueError: pass sys.stderr.write('Error: %s\n' % message) # Log the full error response for debugging purposes. LOGGER.debug(http_error.resp) LOGGER.debug(http_error.content) def PrintResult(self, result): """Pretty-print the result of the command. If a class defines a list of ('title', 'json.field.path') values named 'fields', this list will be used to print a table of results using prettytable. If self.fields does not exist, result will be printed as pretty JSON. Note that if the result is either an Operations object or an OperationsList, it will be special cased and formatted appropriately. Args: result: A JSON-serializable object to print. """ if self._flags.print_json or self._flags.format == 'json': # We could have used the pprint module, but it produces # noisy output due to all of our keys and values being # unicode strings rather than simply ascii. print json.dumps(result, sort_keys=True, indent=2) return if result: if self._flags.format == 'names': self._PrintNamesOnly(result) elif self.IsResultAList(result): self._PrintList(result) else: self._PrintDetail(result) def _PrintNamesOnly(self, result): """Prints only names of the resources returned by Google Compute Engine API. Args: result: A GCE List resource to print. """ if self.IsResultAList(result): results = result.get('items', []) else: results = [result] for obj in results: name = obj.get('name') if name: print name def _CreateFormatter(self): if self._flags.format == 'sparse': return table_formatter.SparsePrettyFormatter() elif self._flags.format == 'csv': return table_formatter.CsvFormatter() else: return table_formatter.PrettyFormatter() def _PartitionResults(self, result): """Partitions results into operations and non-operation resources.""" res = [] ops = [] for obj in result.get('items', []): if self.IsResultAnOperation(obj): ops.append(obj) else: res.append(obj) return res, ops def _PrintList(self, result): """Prints a result which is a Google Compute Engine List resource. For the result of batch operations, splits the result list into operations and other resources and possibly prints two tables. The operations typically represent errors (unless printing results of listoperations command) whereas the real resources typically represent successfully completed operations. Args: result: A GCE List resource to print. """ # Split results into operations and the rest of resources. res, ops = self._PartitionResults(result) if res and ops: res_header = '\nTable of resources:\n' ops_header = '\nTable of operations:\n' else: res_header = ops_header = None if res or not ops: self._CreateAndPrintTable(res, res_header, getattr(self, 'summary_fields', None)) if ops: self._CreateAndPrintTable(ops, ops_header, self.operation_summary_fields) def _CreateAndPrintTable(self, values, header, fields): """Creates a table representation of the list of resources and prints it. Args: values: List of resources to display. header: A header to print before the table (can be None). fields: Summary field definition for the table. """ column_names = [x[0] for x in fields] rows = [self._FlattenObjectToList(row, fields) for row in values] table = self._CreateFormatter() table.AddColumns(column_names) table.AddRows(rows) if header: print header print table def _PrintDetail(self, result): """Prints a detail view of the result which is an individual resource. Args: result: A resource to print. """ if self.IsResultAnOperation(result): detail_fields = self.operation_detail_fields else: detail_fields = getattr(self, 'detail_fields', None) if not detail_fields: return row_names = [x[0] for x in detail_fields] table = self._CreateFormatter() table.AddColumns(('property', 'value')) property_bag = self._FlattenObjectToList(result, detail_fields) for i, v in enumerate(property_bag): table.AddRow((row_names[i], v)) # Handle customized printing of this result. # Operations are special cased here. if self.IsResultAnOperation(result): self.__AddErrorsForOperation(result, table) elif hasattr(self, 'CustomizePrintResult'): self.CustomizePrintResult(result, table) print table def __GetRequiredAuthScopes(self): """Returns a list of scopes required for this command.""" return scopes.DEFAULT_AUTH_SCOPES def SetFlagDefaults(self): if 'project' in FLAGS.FlagDict() and not FLAGS['project'].present: try: metadata = metadata_lib.Metadata() setattr(FLAGS, 'project', metadata.GetProjectId()) except metadata_lib.MetadataError: pass class GoogleComputeListCommand(GoogleComputeCommand): """Base class for list commands.""" # Overload these values in derived classes if they represent collections # at non-global scopes. is_global_level_collection = True is_zone_level_collection = False def __init__(self, name, flag_values): """Initializes a new instance of a GoogleComputeListCommand. Args: name: The name of the command. flag_values: The values of command line flags to be used by the command. """ super(GoogleComputeListCommand, self).__init__(name, flag_values) summary_fields = [x[0] for x in getattr(self, 'summary_fields', [])] if summary_fields: sort_fields = [] for field in summary_fields: sort_fields.append(field) sort_fields.append('-' + field) flags.DEFINE_enum('sort_by', None, sort_fields, 'Sort output results by the given field name. Field ' 'names starting with a "-" will lead to a descending ' 'order.', flag_values=flag_values) flags.DEFINE_integer('max_results', 100, 'Maximum number of items to list', lower_bound=1, flag_values=flag_values) flags.DEFINE_string('filter', None, 'Filter expression for filtering listed resources. ' 'See gcutil documentation for syntax of the filter ' 'expression here: http://developers.google.com' '/compute/docs/gcutil/tips#filtering', flag_values=flag_values) flags.DEFINE_bool('fetch_all_pages', False, 'Whether to fetch all pages on truncated results', flag_values=flag_values) def Handle(self): """Returns the result of list on a resource type.""" if self._flags.sort_by or self._flags.fetch_all_pages: max_results = None else: max_results = self._flags.max_results if (self._IsUsingAtLeastApiVersion('v1beta14') and self.is_zone_level_collection): # We have three cases for zone level collections: # 1. A specific zone was specified via flag - just list the resources # in that zone. # 2. The collection exists in both the zone and global namespaces and # the "global" zone was specified - just list the resources in the # global namespace. # 3. No zone was specified via flag - list all resources in all # namespaces for this resource type. if 'zone' in self._flags and self._flags.zone: if (self.is_global_level_collection and self._flags.zone == GLOBAL_ZONE_NAME): zones = [None] else: zones = [self.DenormalizeResourceName(self._flags.zone)] else: zones = [] # If the collection is global and per-zone, include results from both. if self.is_global_level_collection: zones.append(None) zones.extend(self._GetZones()) items = [] for zone in zones: list_func = self.ListZoneFunc() if zone else self.ListFunc() sub_result = utils.All(list_func, self._project, max_results, self._flags.filter, zone) kind = sub_result.get('kind') items.extend(sub_result.get('items', [])) return {'kind': kind, 'items': items} # A global collection return utils.All( self.ListFunc(), self._project, max_results=max_results, filter=self._flags.filter) def _PrintList(self, result): """Prints a table for the given resources.""" items = result.get('items', []) column_names = [x[0] for x in self.summary_fields] rows = [self._FlattenObjectToList(row, self.summary_fields) for row in items] sort_col = self._flags.sort_by or getattr(self, 'default_sort_field', None) if sort_col: reverse = False if sort_col.startswith('-'): reverse = True sort_col = sort_col[1:] if sort_col in column_names: sort_col_idx = column_names.index(sort_col) rows = sorted(rows, key=(lambda row: row[sort_col_idx]), reverse=reverse) else: LOGGER.warn('Invalid sort column: ' + sort_col) if not self._flags.fetch_all_pages: # Truncates the list of results. If sorting was requested, all # the pages had to be fetched, so we have to truncate the final # results on the client side. If sorting was not requested, we # truncate anyway in case the server gives back more results # than requested. rows = rows[:self._flags.max_results] table = self._CreateFormatter() table.AddColumns(column_names) table.AddRows(rows) print table
gpl-3.0
4,345,030,681,632,666,000
33.754425
80
0.629989
false
vecnet/vecnet.openmalaria
vecnet/openmalaria/tests/test_deployment.py
1
1970
# Copyright (C) 2016, University of Notre Dame # All rights reserved import unittest from vecnet.openmalaria.scenario.interventions import Deployment class TestScenario(unittest.TestCase): def setUp(self): pass def test_create_timed(self): deployment = Deployment(None) deployment.create_from_xml(""" <deployment name="Nets"> <component id="LLIN" /> <timed> <deploy coverage="0.0" time="0" /> <deploy coverage="0.0" time="230" /> <deploy coverage="0.0" time="449" /> </timed> </deployment>""") self.assertEqual(deployment.name, "Nets") self.assertEqual(len(deployment.components), 1) self.assertEqual(deployment.components[0], "LLIN") self.assertEqual(deployment.timesteps, [{"coverage": 0.0, "time": 0}, {'coverage': 0.0, 'time': 230}, {'coverage': 0.0, 'time': 449}]) self.assertIsNone(deployment.continuous) def test_create_continuous(self): deployment = Deployment(None) deployment.create_from_xml(""" <deployment> <component id="PEV" /> <continuous> <deploy coverage="0.0" targetAgeYrs="0.0833" /> <deploy coverage="0.0" targetAgeYrs="0.17" /> <deploy coverage="0.0" targetAgeYrs="0.25" /> </continuous> </deployment>""") self.assertRaises(AttributeError, getattr, deployment, "name") self.assertEqual(len(deployment.components), 1) self.assertEqual(deployment.components[0], "PEV") self.assertRaises(AttributeError, getattr, deployment, "timesteps") self.assertEqual(deployment.continuous, [{'end': 2147483647, 'begin': 0, 'targetAgeYrs': 0.0833}, {'end': 2147483647, 'begin': 0, 'targetAgeYrs': 0.17}, {'end': 2147483647, 'begin': 0, 'targetAgeYrs': 0.25}])
mpl-2.0
3,677,951,995,925,831,700
43.772727
216
0.57868
false
graag/lgogwebui
lgogwebui.py
1
15239
#!/usr/bin/env python3 # pylint: disable=invalid-name,bad-continuation,too-many-statements, # pylint: disable=too-many-branches,too-many-locals """ Simple web interfaceDocker for [lgogdownloader](https://github.com/Sude-/lgogdownloader), an gog.com download manager for Linux. """ import sys import json import os from threading import Timer from concurrent.futures import ThreadPoolExecutor from flask import render_template, jsonify, request, redirect, url_for from flask_autoindex import AutoIndex from sqlalchemy.orm.exc import NoResultFound from sqlalchemy import or_ import main import config import lgogdaemon import models from models import Game, User, LoginStatus, Status, Session app = main.app download_scheduler = ThreadPoolExecutor(max_workers=2) update_scheduler = ThreadPoolExecutor(max_workers=2) # Create instance of AutoIndex used to display contents of game download # directory. Explicitely disable add_url_rules as it would define some default # routes for "/" index = AutoIndex(app, config.lgog_library, add_url_rules=False) # Define logger handlers and start update timer if not app.debug or os.environ.get("WERKZEUG_RUN_MAIN") == "true": _session = Session() app.logger.info("Initialize lgogwebui ...") app.logger.info(sys.version) # Make sure that the database exists models.Base.metadata.create_all(models.ENGINE) # Make sure that login state exists in the DB try: _user = _session.query(User).one() except NoResultFound: _user = User() _session.add(_user) _session.commit() if _user.state != LoginStatus.logon: _user.state = LoginStatus.logoff _session.commit() # Start update loop Timer(5, lgogdaemon.update_loop, (config.update_period, lgogdaemon.update, (update_scheduler,))).start() # Add to the download queue games marked in the DB _games = _session.query(Game).all() for _game in _games: if _game.state == Status.queued or _game.state == Status.running: app.logger.info("Found %s game for download: %s", _game.name, _game.state) download_scheduler.submit(lgogdaemon.download, _game.name) Session.remove() @app.after_request def session_cleaner(response): """Cleanup session ater each request.""" Session.remove() return response @app.route('/') def library(): """Display the main page.""" _session = Session() # TODO store in some cache data = { 'games': [] } try: with open(os.path.join( config.lgog_cache, 'gamedetails.json'), encoding='utf-8') as f: data = json.load(f) except FileNotFoundError: pass _user = _session.query(User).one() _user_data = { 'state': _user.state.name, 'selected': {} } _user_data['selected']['windows'] = (_user.platform & 1 == 1) _user_data['selected']['macos'] = (_user.platform & 2 == 2) _user_data['selected']['linux'] = (_user.platform & 4 == 4) _metadata = [] for game_data in data['games']: _db_found = False _available = 0 _selected = 0 _meta = { 'gamename': game_data['gamename'], 'title': game_data['title'], 'icon': game_data['icon'] } if 'installers' not in game_data: continue for inst in game_data['installers']: _available |= inst['platform'] try: db_game = _session.query(Game).filter( Game.name == game_data['gamename']).one() _db_found = True if db_game.platform_available != _available: db_game.platform_available = _available _session.add(db_game) # app.logger.debug("Game in DB: %s", db_game.name) except NoResultFound: _name = game_data['gamename'] db_game = Game() db_game.name = _name db_game.state = Status.new db_game.platform_available = _available _game_dir = os.path.join(config.lgog_library, _name) if os.path.isdir(_game_dir): _platform_linux = False _platform_mac = False _platform_windows = False # Search for downloaded installers for _file in os.listdir(_game_dir): if not os.path.isdir(os.path.join(_game_dir, _file)): if _file.endswith('.sh'): _platform_linux = True elif _file.endswith('.exe'): _platform_windows = True elif _file.endswith('.pkg'): _platform_mac = True elif _file.endswith('.dmg'): _platform_mac = True _platform = 0 if _platform_windows: _platform |= 1 if _platform_mac: _platform |= 2 if _platform_linux: _platform |= 4 if _platform > 0: db_game.platform_ondisk = _platform db_game.state = Status.done if (_platform & _user.platform) == 0: db_game.platform = _platform _session.add(db_game) _session.commit() _meta['state'] = db_game.state.name _meta['progress'] = int(db_game.progress) _meta['done_count'] = int(db_game.done_count) _meta['missing_count'] = int(db_game.missing_count) _meta['update_count'] = int(db_game.update_count) _meta['user_selected'] = False if db_game.platform >= 0: _selected = db_game.platform _meta['user_selected'] = True else: _selected = (_available & _user.platform) if _meta['missing_count'] == 0 and _selected != (db_game.platform_ondisk & _selected): _meta['missing_count'] = 1 _ondisk = db_game.platform_ondisk _meta['available'] = {} _meta['selected'] = {} _meta['ondisk'] = {} _meta['available']['windows'] = (_available & 1 == 1) _meta['available']['macos'] = (_available & 2 == 2) _meta['available']['linux'] = (_available & 4 == 4) _meta['selected']['windows'] = (_selected & 1 == 1) _meta['selected']['macos'] = (_selected & 2 == 2) _meta['selected']['linux'] = (_selected & 4 == 4) _meta['ondisk']['windows'] = (_ondisk & 1 == 1) _meta['ondisk']['macos'] = (_ondisk & 2 == 2) _meta['ondisk']['linux'] = (_ondisk & 4 == 4) # app.logger.debug("%s\n%s\n%s", game_data["gamename"], # game_data["selected"], game_data["available"]) _metadata.append(_meta) if _db_found and db_game.state == Status.done and \ db_game.platform != ( db_game.platform_ondisk & db_game.platform ): db_game.state = Status.missing _session.add(db_game) _session.commit() # app.logger.debug(_metadata) _root = request.environ['SCRIPT_NAME'] or '' return render_template('library.html', data=_metadata, user=_user_data, root_url=_root) @app.route('/platform/<game>/<platform>') def toggle_platform(game, platform): """ Toggle active state of platfrom for a game. :param game: - game name :param platform: - platform id (1 - windows, 2 - macos, 4 - linux) """ app.logger.info("Requesting toggle of %s platform: %s.", game, platform) _session = Session() _user = _session.query(User).one() _result = { 'missing': False } _platform_list = [1, 2, 4] _platform = int(platform) if _platform not in _platform_list: app.logger.error( "Unknown platform requested for %s: %s", game, platform) return "Unknown platform requested", 400 app.logger.info( "Requesting change of platform for %s: %s.", game, platform) try: # Game in db - toggle platfrom db_game = _session.query(Game).filter(Game.name == game).one() except NoResultFound: app.logger.error("Game %s not found in DB.", game) return "Unable to find game in the database.", 500 # app.logger.debug("Game %s found in the DB.", game) if db_game.state == Status.running: return "Platform change during download is prohibited.", 400 _selected = db_game.platform if _selected < 0: _selected = (db_game.platform_available & _user.platform) _state = _selected & _platform if _state == _platform: # Disable platform _mask = ~ _platform db_game.platform = _selected & _mask else: db_game.platform = _selected | _platform if db_game.platform != (db_game.platform_ondisk & db_game.platform): app.logger.debug("Game %s missing platforms.", game) if db_game.state != Status.queued or \ db_game.state != Status.running: db_game.state = Status.missing _result['missing'] = True else: app.logger.debug("Game %s has all platforms.", game) if db_game.state == Status.missing: db_game.state = Status.done _session.commit() return jsonify(_result) @app.route('/default_platform/<platform>') def toggle_default_platform(platform): """ Toggle active state of platfrom for a game. :param game: - game name :param platform: - platform id (1 - windows, 2 - macos, 4 - linux) """ app.logger.info("Requesting toggle of default platform: %s.", platform) _session = Session() _user = _session.query(User).one() _result = {} _platform_list = [1, 2, 4] _platform = int(platform) if _platform not in _platform_list: app.logger.error( "Unknown platform requested: %s", platform) return "Unknown platform requested", 400 app.logger.info( "Requesting change of platform: %s.", platform) app.logger.debug("Current: %s", _user.platform) _state = _user.platform & _platform if _state == _platform: # Disable platform _mask = ~ _platform _user.platform = _user.platform & _mask else: _user.platform = _user.platform | _platform # Game in db - toggle platfrom db_games = _session.query(Game).filter( Game.platform_available.op('&')(_platform) == _platform).all() for db_game in db_games: # app.logger.debug("Game %s found in the DB.", game) if db_game.state == Status.running: return "Platform change during download is prohibited.", 400 _selected = db_game.platform if _selected < 0: if (_user.platform & db_game.platform_available) != \ (db_game.platform_ondisk & _user.platform & db_game.platform_available) \ and db_game.state != Status.queued and \ db_game.state != Status.running: _result[db_game.name] = {'missing': True} else: _result[db_game.name] = {'missing': False} _session.commit() return jsonify(_result) @app.route('/download/<game>') def download(game): """ Request game download :param game: - game name """ _session = Session() app.logger.info("Requesting download of: %s.", game) try: db_game = _session.query(Game).filter(Game.name == game).one() # app.logger.debug("Game %s found in the DB.", game) except NoResultFound: app.logger.error("Game %s not found in DB.", game) return "Unable to find the game in the database.", 500 if db_game.state != Status.running: db_game.state = Status.queued db_game.progress = 0 _session.commit() download_scheduler.submit(lgogdaemon.download, game) return "OK" @app.route('/stop/<game>') def stop(game): """ Stop game download :param game: - game name """ _session = Session() app.logger.info("Requesting stop of: %s.", game) try: db_game = _session.query(Game).filter(Game.name == game).one() # app.logger.debug("Game %s found in the DB.", game) except NoResultFound: return "Unable to find game in the database.", 500 if db_game.state == Status.running or db_game.state == Status.queued: db_game.state = Status.stop _session.commit() return "OK" @app.route('/status', methods=['GET']) def status_all(): """ Get status of all active downloads. """ # app.logger.debug("List of active game downloads") _session = Session() games = _session.query(Game).filter( or_(Game.state == Status.queued, Game.state == Status.running)).all() result = [game.name for game in games] return jsonify(result) @app.route('/status', methods=['POST']) def status_selected(): """ Get status of selected downloads. The list of games to check should be sent as POST data. """ check_games = request.get_json() # app.logger.debug("Status of games requested: %s", check_games) _session = Session() games = _session.query(Game).filter( Game.name.in_(check_games)).all() result = {} # logging.debug("Found %s games.", len(games)) for game in games: game_res = { 'state': game.state.name, 'progress': game.progress } # logging.debug("%s : %s", game.name, game.state) if game.state == Status.done: game_res['progress'] = 100 elif game.state != Status.queued and game.state != Status.running: game_res['progress'] = 0 result[game.name] = game_res return jsonify(result) @app.route('/user_status', methods=['GET']) def user_status(): """ Get status of user session """ _session = Session() _user = _session.query(User).one() _last_update = _user.last_update if _last_update is not None: _last_update = int(_last_update.timestamp()) else: _last_update = 0 result = { 'user_status': _user.state.name, 'last_update': _last_update } return jsonify(result) @app.route('/login', methods=['POST']) def login(): """ Execute login to GOG.com """ user = request.form['user'] password = request.form['password'] update_scheduler.submit(lgogdaemon.login, user, password) return redirect(url_for('library')) @app.route('/login_2fa', methods=['POST']) def login_2fa(): """ Set 2FA code for GOG.com """ code = request.form['code'] app.logger.debug("Security code recieved: %s", code) lgogdaemon.msgQueue.put(code) _session = Session() _user = _session.query(User).one() _user.state = LoginStatus.running _session.commit() return redirect(url_for('library')) @app.route('/gog-repo/<path:path>') def browse(path): """ Load directory view for selected path. """ return index.render_autoindex(path, endpoint='.browse')
bsd-2-clause
3,339,231,611,091,875,300
34.275463
94
0.572741
false
darcyliu/storyboard
boto/mws/exception.py
1
2535
# Copyright (c) 2012 Andy Davidoff http://www.disruptek.com/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from boto.exception import BotoServerError class ResponseErrorFactory(BotoServerError): def __new__(cls, *args, **kw): error = BotoServerError(*args, **kw) try: newclass = globals()[error.error_code] except KeyError: newclass = ResponseError obj = newclass.__new__(newclass, *args, **kw) obj.__dict__.update(error.__dict__) return obj class ResponseError(BotoServerError): """ Undefined response error. """ retry = False def __repr__(self): return '{}({}, {},\n\t{})'.format(self.__class__.__name__, self.status, self.reason, self.error_message) def __str__(self): return 'MWS Response Error: {0.status} {0.__class__.__name__} {1}\n' \ '{2}\n' \ '{0.error_message}'.format(self, self.retry and '(Retriable)' or '', self.__doc__.strip()) class RetriableResponseError(ResponseError): retry = True class InvalidParameterValue(ResponseError): """ One or more parameter values in the request is invalid. """ class InvalidParameter(ResponseError): """ One or more parameters in the request is invalid. """ class InvalidAddress(ResponseError): """ Invalid address. """
mit
-7,415,622,772,086,536,000
32.8
78
0.631164
false
ClockworkOrigins/m2etis
configurator/autosim/ssh/SSHClient.py
1
2844
#!/usr/bin/env python __author__ = 'Andreas M. Wahl' import logging import paramiko logging.getLogger("paramiko").setLevel(logging.WARNING) class SSHClient: def __init__(self, hostname, port, username, key_filename): self.hostname = hostname self.port = port self.username = username self.key_filename = key_filename self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.client.load_system_host_keys() def connect(self): """ Connect to the remote server. """ self.client.connect(hostname=self.hostname, username=self.username, key_filename=self.key_filename) def disconnect(self): """ Disconnect from the remote server. """ self.client.close() def execute(self, command): """ Execute a command on the remote server and retrieve the remote I/O-Streams. :param command: command to be executed :return: list containg the stdin, stdout and stderr of the executed command """ inpt, outpt, errpt = self.client.exec_command(command) return inpt, outpt, errpt def copy_file(self, file_path, local_file): """ Copy a local file to the remote server. :param file_path: target path on the remote server :param local_file: local file to be copied """ sftp_client = self.client.open_sftp() sftp_client.put(local_file, file_path) sftp_client.close() def delete_file(self, file_path): """ Delete a file from the remote sever. :param file_path: path to the file to be deleted """ sftp_client = self.client.open_sftp() sftp_client.remove(file_path) sftp_client.close() def read_file(self, file_path): """ Read a file from the remote server. :param file_path: path to the file to be read :return: contents of the remote file """ sftp_client = self.client.open_sftp() fileObject = sftp_client.file(file_path) fileContent = fileObject.readlines() fileObject.close() sftp_client.close() return fileContent def stat(self, file_path): """ Check if a file exists on the remote server. :param file_path: path of the file to be checked :return: True if the file exists, False otherwise :raise: IOError if there is a problem with the remote file system """ sftp_client = self.client.open_sftp() try: sftp_client.stat(file_path) except IOError, e: if e[0] == 2: return False raise else: return True finally: sftp_client.close()
apache-2.0
-7,673,391,323,919,479,000
29.913043
107
0.594585
false
raptor2101/Gamestar
default.py
1
2666
# -*- coding: utf-8 -*- #-------------LicenseHeader-------------- # plugin.video.gamestar - Downloads/view videos from gamestar.de # Copyright (C) 2010 Raptor 2101 [[email protected]] # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import xbmcaddon; from gamestar import GamestarWeb from gamepro import GameproWeb from simplexbmc import SimpleXbmcGui; def get_params(): """ extract params from argv[2] to make a dict (key=value) """ paramDict = {} try: print "get_params() argv=", sys.argv if sys.argv[2]: paramPairs=sys.argv[2][1:].split( "&" ) for paramsPair in paramPairs: paramSplits = paramsPair.split('=') if (len(paramSplits))==2: paramDict[paramSplits[0]] = paramSplits[1] except (ValueError,IndexError): errorOK() return paramDict __settings__ = xbmcaddon.Addon(id='plugin.video.gamestar') rootPath = __settings__.getAddonInfo('path'); gui = SimpleXbmcGui(" "); displayGamestar = __settings__.getSetting('gamestar') == "true"; displayGamepro = __settings__.getSetting('gamepro') == "true"; displayYoutube = __settings__.getSetting('youtube') == "true"; displayYoutube = __settings__.getSetting('show_shortname') == "true"; gui.openMenuContext(); params=get_params() action=params.get("action", "") cat=int(params.get("cat", 0)) gui.log("action: "+action); gui.log("cat: %s"%cat); if(action == "list"): videoObjects = []; if(displayGamestar): website = GamestarWeb(gui); videoObjects.extend(website.getVideoLinkObjects(cat)) if(displayGamepro): website = GameproWeb(gui); videoObjects.extend(website.getVideoLinkObjects(cat)) gui.buildVideoLink(videoObjects); else: categories = {}; if(displayGamestar): website = GamestarWeb(gui); for (index,pictureLink) in website.getCategories().iteritems(): categories[index]=pictureLink; if(displayGamepro): website = GameproWeb(gui); for (index,pictureLink) in website.getCategories().iteritems(): if index not in categories: categories[index]=pictureLink; gui.showCategories(categories); gui.closeMenuContext();
gpl-3.0
-3,830,740,289,785,186,000
32.746835
71
0.705551
false
eliksir/mailmojo-python-sdk
mailmojo_sdk/api/account_api.py
1
16311
# coding: utf-8 """ MailMojo API v1 of the MailMojo API # noqa: E501 OpenAPI spec version: 1.1.0 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import re # noqa: F401 # python 2 and python 3 compatibility library import six from mailmojo_sdk.api_client import ApiClient class AccountApi(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. Ref: https://github.com/swagger-api/swagger-codegen """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client def create_account(self, user, **kwargs): # noqa: E501 """Create an account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_account(user, async_req=True) >>> result = thread.get() :param async_req bool :param UserCreation user: (required) :return: User If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.create_account_with_http_info(user, **kwargs) # noqa: E501 else: (data) = self.create_account_with_http_info(user, **kwargs) # noqa: E501 return data def create_account_with_http_info(self, user, **kwargs): # noqa: E501 """Create an account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.create_account_with_http_info(user, async_req=True) >>> result = thread.get() :param async_req bool :param UserCreation user: (required) :return: User If the method is called asynchronously, returns the request thread. """ all_params = ['user'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method create_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'user' is set if ('user' not in params or params['user'] is None): raise ValueError("Missing the required parameter `user` when calling `create_account`") # noqa: E501 collection_formats = {} path_params = {} query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None if 'user' in params: body_params = params['user'] # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['mailmojo_auth'] # noqa: E501 return self.api_client.call_api( '/v1/accounts/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='User', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_account_by_username(self, username, **kwargs): # noqa: E501 """Retrieve account details. # noqa: E501 This endpoint can be used to get details about your own account, or a subuser associated with you as a partner. If the username of your current authenticated user is unknown, you may use the special username 'me' to retrieve details about the authenticated user account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_account_by_username(username, async_req=True) >>> result = thread.get() :param async_req bool :param str username: Username of the account to get details for, or the special username `me` to get details about your authenticated user. (required) :return: User If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_account_by_username_with_http_info(username, **kwargs) # noqa: E501 else: (data) = self.get_account_by_username_with_http_info(username, **kwargs) # noqa: E501 return data def get_account_by_username_with_http_info(self, username, **kwargs): # noqa: E501 """Retrieve account details. # noqa: E501 This endpoint can be used to get details about your own account, or a subuser associated with you as a partner. If the username of your current authenticated user is unknown, you may use the special username 'me' to retrieve details about the authenticated user account. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_account_by_username_with_http_info(username, async_req=True) >>> result = thread.get() :param async_req bool :param str username: Username of the account to get details for, or the special username `me` to get details about your authenticated user. (required) :return: User If the method is called asynchronously, returns the request thread. """ all_params = ['username'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_account_by_username" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'username' is set if ('username' not in params or params['username'] is None): raise ValueError("Missing the required parameter `username` when calling `get_account_by_username`") # noqa: E501 collection_formats = {} path_params = {} if 'username' in params: path_params['username'] = params['username'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['mailmojo_auth'] # noqa: E501 return self.api_client.call_api( '/v1/accounts/{username}/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='User', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def get_domain(self, domain, **kwargs): # noqa: E501 """Retrieve domain details and authentication status. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_domain(domain, async_req=True) >>> result = thread.get() :param async_req bool :param str domain: (required) :return: Domain If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.get_domain_with_http_info(domain, **kwargs) # noqa: E501 else: (data) = self.get_domain_with_http_info(domain, **kwargs) # noqa: E501 return data def get_domain_with_http_info(self, domain, **kwargs): # noqa: E501 """Retrieve domain details and authentication status. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.get_domain_with_http_info(domain, async_req=True) >>> result = thread.get() :param async_req bool :param str domain: (required) :return: Domain If the method is called asynchronously, returns the request thread. """ all_params = ['domain'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method get_domain" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'domain' is set if ('domain' not in params or params['domain'] is None): raise ValueError("Missing the required parameter `domain` when calling `get_domain`") # noqa: E501 collection_formats = {} path_params = {} if 'domain' in params: path_params['domain'] = params['domain'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['mailmojo_auth'] # noqa: E501 return self.api_client.call_api( '/v1/domains/{domain}/', 'GET', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='Domain', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) def update_account(self, username, **kwargs): # noqa: E501 """Update account details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_account(username, async_req=True) >>> result = thread.get() :param async_req bool :param str username: Username of the user to update. (required) :return: User If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.update_account_with_http_info(username, **kwargs) # noqa: E501 else: (data) = self.update_account_with_http_info(username, **kwargs) # noqa: E501 return data def update_account_with_http_info(self, username, **kwargs): # noqa: E501 """Update account details. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_account_with_http_info(username, async_req=True) >>> result = thread.get() :param async_req bool :param str username: Username of the user to update. (required) :return: User If the method is called asynchronously, returns the request thread. """ all_params = ['username'] # noqa: E501 all_params.append('async_req') all_params.append('_return_http_data_only') all_params.append('_preload_content') all_params.append('_request_timeout') params = locals() for key, val in six.iteritems(params['kwargs']): if key not in all_params: raise TypeError( "Got an unexpected keyword argument '%s'" " to method update_account" % key ) params[key] = val del params['kwargs'] # verify the required parameter 'username' is set if ('username' not in params or params['username'] is None): raise ValueError("Missing the required parameter `username` when calling `update_account`") # noqa: E501 collection_formats = {} path_params = {} if 'username' in params: path_params['username'] = params['username'] # noqa: E501 query_params = [] header_params = {} form_params = [] local_var_files = {} body_params = None # HTTP header `Accept` header_params['Accept'] = self.api_client.select_header_accept( ['application/json']) # noqa: E501 # HTTP header `Content-Type` header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 ['application/json']) # noqa: E501 # Authentication setting auth_settings = ['mailmojo_auth'] # noqa: E501 return self.api_client.call_api( '/v1/accounts/{username}/', 'POST', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type='User', # noqa: E501 auth_settings=auth_settings, async_req=params.get('async_req'), _return_http_data_only=params.get('_return_http_data_only'), _preload_content=params.get('_preload_content', True), _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats)
apache-2.0
-4,093,272,954,201,039,400
37.46934
292
0.5839
false
hoettges/QKan
qkan/ganglinienhe8/application.py
1
39367
# -*- coding: utf-8 -*- """ /*************************************************************************** Laengsschnitt A QGIS plugin Plugin für einen animierten Laengsschnitt ------------------- begin : 2017-02-16 git sha : $Format:%H$ copyright : (C) 2017 by Leon Ochsenfeld email : [email protected] ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ import copy import logging import os.path from qgis.core import Qgis, QgsProject from qgis.PyQt.QtCore import Qt from qgis.PyQt.QtGui import QIcon from qgis.PyQt.QtWidgets import QFileDialog, QGridLayout, QLabel, QMessageBox from qkan.database.sbfunc import SBConnection from qkan.database.navigation import Navigator from qkan import QKan # noinspection PyUnresolvedReferences from . import plotter, resources from . import slider as s from .application_dialog import LaengsschnittDialog from .Enums import LayerType, SliderMode, Type from .ganglinie8 import Ganglinie8 # Initialize Qt resources from file resources.py main_logger = logging.getLogger("QKan.ganglinienhe8.application.main") main_logger.info("Application-Modul gestartet") class GanglinienHE8: """QGIS Plugin Implementation.""" def __init__(self, iface): """Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface """ self.__log = logging.getLogger("QKan.application.Laengsschnitt") self.__t = 2 # Save reference to the QGIS interface self.__iface = iface # Create the dialog (after translation) and keep reference self.__dlg = LaengsschnittDialog() self.__result_db = "" self.__spatialite = "" self.__maximizer = None self.__animator = None self.__speed_controller = None self.__speed_label = None self.__default_function = None self.__ganglinie = Ganglinie8(1) self.__dlg2 = self.__ganglinie.get_dialog() self.__workspace = "" def initGui(self): """ Längsschnitt- und Ganglinie-Tool werden als unabhängige Werkzeuge dargestellt. Hier werden die GUI-Elemente mit bestimmten Event-Listenern verbunden. """ icon_path_laengs = ":/plugins/qkan/ganglinienhe8/icon_laengs.png" icon_path_gangl = ":/plugins/qkan/ganglinienhe8/icon_gangl.png" icon_forward = ":/plugins/qkan/ganglinienhe8/icon_forward.png" icon_backward = ":/plugins/qkan/ganglinienhe8/icon_backward.png" QKan.instance.add_action( icon_path_laengs, text="Längsschnitt-Tool für HE8", callback=self.__run, parent=self.__iface.mainWindow(), ) QKan.instance.add_action( icon_path_gangl, text="Ganglinien-Tool für HE8", callback=self.__run_ganglinie, parent=self.__iface.mainWindow(), ) self.__dlg.setWindowFlags(Qt.Window) self.__dlg2.setWindowFlags(Qt.Window) self.__dlg.btn_forward.setText("") self.__dlg.btn_forward.setIcon(QIcon(icon_forward)) self.__dlg.btn_backward.setText("") self.__dlg.btn_backward.setIcon(QIcon(icon_backward)) self.__dlg.finished.connect(self.__finished) self.__dlg.btn_path.clicked.connect(self.__select_db) self.__dlg.checkbox_maximum.stateChanged.connect(self.__switch_max_values) self.__dlg.btn_forward.clicked.connect(self.__step_forward) self.__dlg.btn_backward.clicked.connect(self.__step_backward) self.__dlg.btn_ganglinie.clicked.connect(self.__ganglinie.show) def __finished(self): """ Schließt den Ganglinien-Dialog, falls geöffnet """ self.__log.info("Ganglinien-Dialog wird geschlossen") self.__dlg2.close() def unload(self): pass def __step_forward(self): """ Geht einen Datensatz nach rechts im Zeitstrahl, falls der Entpunkt nicht erreicht ist. Pausiert die Animation, falls diese läuft. """ if self.__speed_controller.get_mode() != SliderMode.Pause: self.__log.info("Animation ist noch nicht pausiert") self.__speed_controller.set_paused() self.__animator.pause() value = self.__dlg.slider.value() maximum = self.__dlg.slider.maximum() if value < maximum: self.__log.info("Zeitstrahl-Slider wird ein Schritt weiter gesetzt") self.__log.debug( "Zeitstrahl-Slider hat jetzt den Wert {}".format(value + 1) ) self.__dlg.slider.setValue(value + 1) def __step_backward(self): """ Geht einen Datensatz nach links im Zeitstrahl, falls der Anfangspunkt nicht erreicht ist. Pausiert die Animation, falls diese läuft. """ if self.__speed_controller.get_mode() != SliderMode.Pause: self.__log.info("Animation ist noch nicht pausiert") self.__speed_controller.set_paused() self.__animator.pause() value = self.__dlg.slider.value() minimum = self.__dlg.slider.minimum() if value > minimum: self.__log.info("Zeitstrahl-Slider wird ein Schritt zurueck gesetzt") self.__log.debug( "Zeitstrahl-Slider hat jetzt den Wert {}".format(value - 1) ) self.__dlg.slider.setValue(value - 1) def __switch_max_values(self, activate): """ Macht die Maximal-Linie sichtbar bzw unsichtbar, abhängig vom Zustand der Checkbox. Plottet die Maximal-Linie falls nicht vorhanden. :param activate: Zustand der Checkbox, nach dem anklicken. :type activate: int """ if activate == 2: self.__maximizer.show() else: self.__maximizer.hide() @staticmethod def __show_message_box(title, _string, _type): """ Generiert eine Messagebox. Abhängig vom _type werden unterschiedliche Optionen in den Dialog eingebunden. Es wird False zurückgegeben, wenn der User auf "Abbrechen" drückt. :param title: Der Titel der MessageBox :type title: str :param _string: Der Inhalt der MessageBox :type _string: str :param _type: Welche Buttons generiert werden sollen. Bzw die Art der MessageBox. :type _type: Type :return: Ob der User auf "Abbrechen" gedrückt hat :rtype: bool """ if _type == Type.Error: standard_buttons = QMessageBox.Ok default_button = QMessageBox.Ok else: standard_buttons = QMessageBox.Cancel | QMessageBox.Open default_button = QMessageBox.Open msg = QMessageBox() msg.setStandardButtons(standard_buttons) msg.setDefaultButton(default_button) msg.setText(_string) msg.setWindowTitle(title) if default_button == QMessageBox.Open: return msg.exec_() != QMessageBox.Open else: msg.exec_() def __speed_control(self, value): """ * Übergibt der Animation die neue Geschwindigkeit. * Updatet den Stil des Sliders Ist die Geschwindigkeit 0, wird die Animation pausiert. :param value: Geänderte Geschwindigkeit. :type value: int """ if self.__speed_controller.get_mode() == SliderMode.Pause: if self.__speed_controller.get_last_mode() == SliderMode.Forward: self.__speed_label.setText("Geschwindigkeit: {}x".format(value)) self.__speed_label.setStyleSheet( "QLabel {color:qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #050DFF, stop:1 #757AFF);}" ) else: self.__speed_label.setText("Geschwindigkeit: -{}x".format(value)) self.__speed_label.setStyleSheet( "QLabel {color:qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #000000, stop:1 #8f8f8f);}" ) elif self.__speed_controller.get_mode() == SliderMode.Forward: self.__speed_label.setText("Geschwindigkeit: {}x".format(value)) self.__speed_label.setStyleSheet( "QLabel {color:qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #050DFF, stop:1 #757AFF);}" ) else: self.__speed_label.setText("Geschwindigkeit: -{}x".format(value)) self.__speed_label.setStyleSheet( "QLabel {color:qlineargradient(x1:0, y1:0, x2:1, y2:1, stop:0 #000000, stop:1 #8f8f8f);}" ) if self.__speed_controller.get_mode() == SliderMode.Pause: self.__log.info("Speed-Controller ist pausiert") self.__animator.pause() return if value == 0: self.__log.info("Neue Geschwindigkeit ist 0") self.__animator.pause() else: self.__log.info("Animation wird in gewünschte Konfiguration abgespielt") self.__animator.play(value, self.__speed_controller.get_mode()) def __slider_click(self, event): """ * Ist der Eventlistener des Zeitstrahl-Sliders bei einem Mausklick. * Definiert das Verhalten je nach gedrückter Taste. :param event: Entspricht dem Mausevent, wenn der Slider angeklickt wird :type event: QMouseEvent """ self.__log.info("Der Zeitstrahl-Slider wurde angeklickt") ctrl = event.modifiers() == Qt.ControlModifier if event.button() == Qt.RightButton: if ctrl: self.__log.debug("STRG+RMT wurde gedrückt") self.__speed_controller.ctrl_click() else: self.__log.debug("RMT wurde gedrückt") self.__speed_controller.set_paused() if self.__speed_controller.get_mode() == SliderMode.Pause: self.__animator.pause() else: self.__log.info("Animation wird fortgesetzt") else: if self.__speed_controller.get_mode() != SliderMode.Pause: self.__speed_controller.set_paused() self.__animator.pause() self.__log.info( "Zeitstrahl-Slider bekommt seinen Default-EventListener zugewiesen" ) self.__default_function(event) def __select_db(self, ganglinie=False): """ Diese Funktion öffnet einen Datei-Dialog, welcher den User auffordert eine Ergebnis-Datenbank auszuwählen. :param ganglinie: Ob Ganglinie oder Längsschnitt gestartet werden soll :type ganglinie: bool """ try: self.__animator.pause() except AttributeError: pass filename, __ = QFileDialog.getOpenFileName( self.__dlg, "Wählen Sie eine Ergebnis-Datenbank", filter="IDBR (*.idbr);; Alle Dateien (*.*)", ) if filename != "": self.__result_db = filename self.__dlg.label_dbname.setText(filename) if ganglinie: self.__run_ganglinie() else: self.__run() def __layer_to_type(self, layer): """ Wandelt layer in einen LayerType um. So wird unabhängig von der User-spezifischen Benennung der richtige Layer gewählt. :param layer: Ist der übergebene Layer, welcher in einen Typen geparst werden soll :type layer: QgsVectorLayer :return: Gibt einen LayerType zurück, der dem übergebenen QgsVectorLayer entspricht :rtype: LayerType """ layer_source = layer.source() kvp = layer_source.split(" ") name = "" for kv in kvp: if kv.startswith("table"): name = kv.split("=")[1][1:-1] elif kv.startswith("dbname") and self.__spatialite == "": self.__spatialite = kv.split("=")[1][1:-1] self.__log.info("SpatiaLite-Datenbank wurde gesetzt") self.__log.debug( u'SpatiaLite-Datenbank liegt in "{}"'.format(self.__spatialite) ) self.__workspace = os.path.dirname(self.__spatialite) self.__log.debug( u'Workspace wurde auf "{}" gesetzt'.format(self.__workspace) ) types = dict( wehre=LayerType.Wehr, haltungen=LayerType.Haltung, schaechte=LayerType.Schacht, pumpen=LayerType.Pumpe, ) try: return types[name] except KeyError: return -1 def __check_resultDB(self, route): """ Prüft, ob alle übergebenen Elemente in der Ergebnisdatenbank liegen. :param route: Routen-Objekt :type route: dict :return: Gibt zurück, ob alle übergebenen Elemente in der Ergebnisdatenbank liegen :rtype: bool """ haltungen = route.get("haltungen", []) schaechte = route.get("schaechte", []) db = SBConnection(self.__result_db) if db is None: main_logger.error( "QKan.Ganglinie8.__check_resultDB:\nDatenbank konnte nicht geöffnet werden:\n{}".format( self.__result_db ) ) statement = u'SELECT kante FROM lau_max_el WHERE "KANTE"={}' for haltung in haltungen: db.sql(statement.format("'{}'".format(haltung))) if db.fetchone() is None: return False statement = u'SELECT knoten FROM lau_max_s WHERE "KNOTEN"={}' for schacht in schaechte: db.sql(statement.format("'{}'".format(schacht))) if db.fetchone() is None: return False del db return True def __run(self): """ Wird aufgerufen, wenn der Längsschnitt angeklickt wird. """ self.__log.info("Längsschnitt-Tool gestartet!") def init_application(): """ Initialisiert den Längsschnitt und liest die gewählten Layer aus. Prüft außerdem auf Kompatibilität und Anzahl der Layer. Bricht ggf. die Funktion ab, wenn der Datensatz fehlerhaft ist. :return: Gibt eine Liste der selektierten Layer zurück und einen LayerType :rtype: (list,LayerType) """ if self.__animator is not None: self.__log.info("Animator bereits vorhanden!") self.__animator.pause() if self.__speed_controller is not None: self.__log.info("Speed-Controller bereits vorhanden!") self.__speed_controller.reset() if self.__ganglinie is not None: self.__dlg2.close() self.__log.info("Ganglinie8 wurde geschlossen.") self.__dlg.close() selected_layers = self.__iface.layerTreeView().selectedLayers() if len(selected_layers) == 0: self.__log.critical("Es wurde kein Layer ausgewählt!") self.__iface.messageBar().pushCritical( "Fehler", "Wählen Sie zunächst ein Layer!" ) return False layer_types = [] for layer in selected_layers: layer_types.append(self.__layer_to_type(layer)) layer_types = list(set(layer_types)) if len(layer_types) != 1: for _l in layer_types: if _l not in [LayerType.Haltung, LayerType.Wehr, LayerType.Pumpe]: self.__log.critical( "Gewählte Layer sind inkompatibel zueinander!" ) self.__iface.messageBar().pushCritical( "Fehler", "Inkompatible Layer-Kombination!" ) return False _layer_type = LayerType.Haltung else: _layer_type = layer_types[0] if _layer_type in [LayerType.Wehr, LayerType.Pumpe]: _layer_type = LayerType.Haltung if _layer_type not in [LayerType.Haltung, LayerType.Schacht]: self.__log.critical("Ausgewählter Layer wird nicht unterstützt.") self.__iface.messageBar().pushCritical( "Fehler", "Ausgewählter Layer wird nicht unterstützt!" ) return False self.__log.info("Layer wurde ausgewählt") self.__log.debug( "Gewählter Layer ist {}".format( "Schacht" if _layer_type == LayerType.Schacht else "Haltung" ) ) while self.__result_db == "": stop = self.__show_message_box( "Ergebnis-Datenbank", "Bitte wählen Sie eine Ergebnis-Datenbank aus!", Type.Selection, ) if stop: self.__log.info("Ergebnis-Datenbank-Auswahl wurde abgebrochen.") return False self.__result_db, _ = QFileDialog.getOpenFileName( self.__dlg, "Wählen Sie eine Simulations-Datenbank", self.__workspace, filter="IDBR (*.idbr);; Alle Dateien (*.*)", ) self.__dlg.label_dbname.setText(self.__result_db) self.__log.info("Ergebnis-Datenbank wurde ausgewählt") self.__log.debug("Ergebnis-Datenbank liegt in {}".format(self.__result_db)) self.__log.info("Navigator wurde initiiert.") return selected_layers, _layer_type initialized = init_application() if initialized: self.__log.info("Längsschnitt wurde erfolgreich initiiert!") layers, layer_type = initialized else: self.__log.warning( "Initiierung abgebrochen. Längsschnitt-Tool wird beendet." ) return speed_controller_initialized = self.__speed_controller is None layout = QGridLayout() if speed_controller_initialized: self.__speed_controller = s.Slider() self.__speed_controller.setMaximumWidth(500) self.__speed_controller.setMinimumWidth(300) layout.addWidget(self.__speed_controller, 0, 0, 1, 1, Qt.AlignRight) self.__speed_label = QLabel("Geschwindigkeit: 0x") self.__speed_label.setStyleSheet( "QLabel {color:qlineargradient(x1:0, y1:0, x2:0, y2:1, stop:0 #050DFF, stop:1 #757AFF);}" ) self.__speed_controller.setToolTip( "Links: Geschwindigkeit einstellen\nRechts: Pause/Start\nStrg+Rechts: Geschwindigkeit umkehren" ) layout.addWidget(self.__speed_label, 1, 0, 1, 1, Qt.AlignCenter) self.__dlg.widget.setLayout(layout) self.__log.info( "Speed-Controller wurde erfolgreich initiiert und in den Dialog eingebettet." ) feature_count = 0 for l in layers: feature_count += l.selectedFeatureCount() self.__log.debug("Es wurden {} Elemente selektiert.".format(feature_count)) if feature_count < 2 and layer_type == LayerType.Schacht: self.__log.critical( "Es wurde eine unzureichende Menge an Elementen selektiert!" ) self.__iface.messageBar().pushCritical( "Fehler", "Bitte wählen Sie mindestens einen Start- und" " Endpunkt Ihrer gewünschten Route!", ) return elif feature_count < 1: self.__log.critical("Es wurde kein Element selektiert!") self.__iface.messageBar().pushCritical( "Fehler", "Bitte wählen Sie mindestens einen Start- und Endpunkt" " Ihrer gewünschten Route!", ) return # run application features = [] for l in layers: features += [f[1] for f in l.selectedFeatures()] features = list(set(features)) self.__log.debug("{} wurde ausgewählt.".format(features)) self.__iface.messageBar().pushMessage( "Navigation", "Route wird berechnet...", level=Qgis.Info, duration=60 ) navigator = MyNavigator(self.__spatialite) if layer_type == LayerType.Haltung: route = navigator.calculate_route_haltung(features) else: route = navigator.calculate_route_schacht(features) self.__iface.messageBar().clearWidgets() if route: self.__log.info("Navigation wurde erfolgreich durchgeführt!") valid_db = self.__check_resultDB(route) if not valid_db: self.__log.critical( "Die übergebene Ergebnisdatenbank ist nicht vollständig." ) self.__iface.messageBar().pushCritical( "Fehler", "Unvollständige oder fehlerhafte Ergebnisdatenbank übergeben!", ) self.__select_db() return self.__log.debug("Route:\t{}".format(route)) else: error_msg = navigator.get_error_msg() self.__log.critical( u'Es trat ein Fehler in der Navigation auf:\t"{}"'.format(error_msg) ) self.__iface.messageBar().pushCritical("Fehler", error_msg) return laengsschnitt = plotter.Laengsschnitt(copy.deepcopy(route)) plotter.set_ax_labels("m", "m") widget, _toolbar = laengsschnitt.get_widget() for i in reversed(list(range(self.__dlg.verticalLayout.count()))): self.__dlg.verticalLayout.itemAt(i).widget().setParent(None) self.__dlg.verticalLayout.addWidget(_toolbar) self.__dlg.stackedWidget.insertWidget(0, widget) self.__dlg.stackedWidget.setCurrentIndex(0) self.__log.info("Toolbar wurde eingebettet.") # init methods self.__dlg.checkbox_maximum.setChecked(True) self.__animator = None self.__animator = plotter.Animator( copy.deepcopy(route), self.__result_db, self.__dlg.slider, self.__dlg.btn_forward, self.__dlg.btn_backward, self.__dlg.label_timestamp, ) self.__ganglinie.refresh( haltungen=route.get("haltungen"), schaechte=route.get("schaechte"), dbname=self.__result_db, laengsschnitt=laengsschnitt, ) self.__ganglinie.draw_at( self.__animator.get_timestamps()[self.__animator.get_last_index()] ) self.__maximizer = None self.__maximizer = plotter.Maximizer(copy.deepcopy(route), self.__result_db) self.__switch_max_values(2) self.__animator.set_ganglinie(self.__ganglinie) self.__dlg2.auto_update.hide() self.__log.info("Auto-Update-Checkbox wurde versteckt") self.__speed_controller.valueChanged.connect(self.__speed_control) self.__dlg.slider.valueChanged.connect(self.__animator.go_step) self.__dlg.slider.setToolTip( "Links: Zeitpunkt einstellen\nRechts: Pause/Start\nStrg+Rechts: Geschwindigkeit umkehren" ) if self.__default_function is None: self.__default_function = self.__dlg.slider.mousePressEvent self.__log.info("MousePressEvent des Sliders wurde gespeichert") self.__dlg.slider.mousePressEvent = lambda event: self.__slider_click(event) plotter.set_legend() self.__dlg.show() self.__log.info("Dialog wird angezeigt") # Längsschnitt starten self.__speed_controller.setValue(5) self.__animator.play(5, SliderMode.Forward) self.__speed_controller.set_paused() # Run the dialog event loop result = self.__dlg.exec_() # See if OK was pressed if result: # neustart # Do something useful here - delete the line containing pass and # substitute with your code. pass # else: # beenden self.__animator.pause() self.__speed_controller.reset() self.__log.info("Längsschnitt wurde geschlossen!") def __run_ganglinie(self): """ Wird aufgerufen, wenn das Ganglinien-Tool angeklickt wird. """ tmp = Ganglinie8(self.__t) self.__t += 1 self.__log.info("Ganglinie8 hinzugefügt") def init_application(): """ Initialisiert die Ganglinie mit den nötigen Parametern. Fragt unter anderem die Datenbanken ab und prüft auf Kompatibilität und Anzahl der Layer. Bricht ggf. die Funktion ab, wenn fehlerhafte Daten vorliegen. :return: Gibt eine Liste von den selektierten Layern und dem vorliegenden LayerType zurück. :rtype: (list,LayerType) """ self.__log.info("Ganglinien-Tool wurde gestartet!") while self.__result_db == "": stop = self.__show_message_box( "Ergebnis-Datenbank", "Bitte wählen Sie eine Ergebnis-Datenbank aus!", Type.Selection, ) if stop: self.__log.info("Ergebnis-Datenbank-Auswahl wurde abgebrochen.") return False self.__result_db, _ = QFileDialog.getOpenFileName( self.__dlg, "Wählen Sie eine Simulations-Datenbank", filter="IDBR (*.idbr);; Alle Dateien (*.*)", ) self.__log.info("Ergebnis-Datenbank wurde ausgewählt") self.__log.debug("Ergebnis-Datenbank liegt in {}".format(self.__result_db)) selected_layers = self.__iface.layerTreeView().selectedLayers() if len(selected_layers) == 0: self.__log.critical("Es wurde kein Layer ausgewählt!") self.__iface.messageBar().pushCritical( "Fehler", "Wählen Sie zunächst ein Layer" ) return False layer_types = [] for layer in selected_layers: layer_types.append(self.__layer_to_type(layer)) layer_types = list(set(layer_types)) if len(layer_types) != 1: _layer_type = LayerType.Haltung else: _layer_type = layer_types[0] if _layer_type in [LayerType.Wehr, LayerType.Pumpe]: _layer_type = LayerType.Haltung if _layer_type not in [LayerType.Haltung, LayerType.Schacht]: self.__log.critical("Ausgewählter Layer wird nicht unterstützt.") self.__iface.messageBar().pushCritical( "Fehler", "Ausgewählter Layer wird nicht unterstützt" ) return False self.__log.info("Layer wurde ausgewählt") self.__log.debug( "Gewählter Layer ist {}".format( "Schacht" if _layer_type == LayerType.Schacht else "Haltung" ) ) return True def auto_update_changed(state): """ Ist der Event-Listener der "Automatische Updates"-Checkbox. :param state: Ist der Zustand der Checkbox, nach dem Klicken :type state: int """ self.__log.info( "Auto-Update wurde {}.".format( "aktiviert" if state == 2 else "deaktiviert" ) ) if state == 2: subscribe_auto_update() selection_changed([0]) else: subscribe_auto_update(False) def subscribe_auto_update(subscribing=True): """ Fügt die entsprechenden Event-Listener hinzu, falls subscribing True ist. Es werden ausschließlich die wichtigen Layer subscribed, da nicht alle relevant sind. :param subscribing: Gibt an, ob dem automatischen Updates subscribed/unsubscribed werden soll. :type subscribing: bool """ for layer in important_layers: if subscribing: layer.selectionChanged.connect(selection_changed) self.__log.info("Event-Listener gesetzt") else: try: layer.selectionChanged.disconnect(selection_changed) self.__log.info("Event-Listener entfernt") except TypeError as e: self.__log.warning( "Beim Entfernen eines Layers trat folgender Fehler auf: {}".format( e ) ) pass def selection_changed(selection): """ Wird aufgerufen, wenn ein subscribter Layer eine Veränderung in seinen selektierten Elementen registriert. :param selection: Bekommt die geänderte Auswahl eines Layers übergeben :type selection: list """ if len(selection) == 0: return _schaechte = [] _haltungen = [] for _l in important_layers: _layer_type = self.__layer_to_type(_l) if _layer_type == LayerType.Schacht: _schaechte += [_f[1] for _f in _l.selectedFeatures()] elif _layer_type in [ LayerType.Haltung, LayerType.Pumpe, LayerType.Wehr, ]: _haltungen += [_f[1] for _f in _l.selectedFeatures()] _schaechte = list(set(_schaechte)) _haltungen = list(set(_haltungen)) _route = dict(haltungen=_haltungen, schaechte=_schaechte) self.__log.info("Selektierung wurde geändert") self.__log.debug("Selektierung:\t{}".format(_route)) tmp.refresh( haltungen=_route.get("haltungen"), schaechte=_route.get("schaechte"), dbname=self.__result_db, ) tmp.show() initialized = init_application() if initialized: self.__log.info("Ganglinien-Tool wurde erfolgreich initiiert!") else: self.__log.warning("Initiierung abgebrochen. Ganglinien-Tool wird beendet.") return _layers = [layer for layer in QgsProject.instance().mapLayers().values()] important_layers = [] for l in _layers: if self.__layer_to_type(l) != -1: important_layers.append(l) feature_count = 0 for l in important_layers: feature_count += l.selectedFeatureCount() self.__log.debug("Es wurden {} Elemente selektiert.".format(feature_count)) if feature_count < 1: self.__log.critical("Es wurde kein Element selektiert!") self.__iface.messageBar().pushCritical( "Fehler", "Bitte wählen Sie mindestens ein Element aus!" ) return schaechte = [] haltungen = [] for l in important_layers: layer_type = self.__layer_to_type(l) if layer_type == LayerType.Schacht: schaechte += [f[1] for f in l.selectedFeatures()] elif layer_type in [LayerType.Haltung, LayerType.Pumpe, LayerType.Wehr]: haltungen += [f[1] for f in l.selectedFeatures()] schaechte = list(set(schaechte)) haltungen = list(set(haltungen)) route = dict(haltungen=haltungen, schaechte=schaechte) self.__log.info("Route wurde erstellt") self.__log.debug("Route:\t{}".format(route)) valid_db = self.__check_resultDB(route) if not valid_db: self.__log.critical( "Die übergebene Ergebnisdatenbank ist nicht vollständig." ) self.__iface.messageBar().pushCritical( "Fehler", "Unvollständige oder fehlerhafte Ergebnisdatenbank übergeben!", ) self.__select_db(ganglinie=True) return tmp.get_dialog().auto_update.show() self.__log.info("Auto-Update-Checkbox wird jetzt angezeigt.") subscribe_auto_update() tmp.get_dialog().auto_update.stateChanged.connect(auto_update_changed) tmp.get_dialog().setWindowFlags(Qt.Window) tmp.refresh( haltungen=route.get("haltungen"), schaechte=route.get("schaechte"), dbname=self.__result_db, ) tmp.draw() tmp.show() self.__log.info("Ganglinie8 wurde initiiert und geplottet.") subscribe_auto_update(False) self.__log.info("Event-Listener auf Layer wurden entfernt.") class MyNavigator(Navigator): def get_info(self, route): """ * Erstellt Dictionarys, welche folgende Informationen beinhalten. * Es wird je ein Dictionary für die Schächte und die Haltungen gemacht. * Schacht- bzw. Haltungs-Name entspricht dem Key. - Schacht: +sohlhoehe:float +deckelhoehe:float - Haltung: +laenge:float +schachtoben:str (Schacht-Name aus QGis) +schachtunten:str (Schacht-Name aus QGis) +sohlhoeheunten:float +sohlhoeheoben:float +querschnitt:float :param route: Beinhaltet getrennt von einander die Haltungs- und Schacht-Namen aus QGis. :type route: dict :return: Gibt ein Tuple von zwei Dictionaries zurück mit allen Haltungs- und Schacht-Namen und den nötigen Informationen zu diesen :rtype: dict, dict """ haltung_info = {} schacht_info = {} statement = """ SELECT * FROM (SELECT haltnam AS name, schoben, schunten, laenge, COALESCE(sohleoben, SO.sohlhoehe) AS sohleoben, COALESCE(sohleunten, SU.sohlhoehe) AS sohleunten, hoehe FROM haltungen LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SO ON haltungen.schoben = SO.schnam LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SU ON haltungen.schunten = SU.schnam UNION SELECT wnam AS name, schoben, schunten, laenge, SO.sohlhoehe AS sohleoben, SU.sohlhoehe AS sohleunten, 0.5 AS hoehe FROM wehre LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SO ON wehre.schoben = SO.schnam LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SU ON wehre.schunten = SU.schnam UNION SELECT pnam AS name, schoben, schunten, 5 AS laenge, SO.sohlhoehe AS sohleoben, SU.sohlhoehe AS sohleunten, 0.5 AS hoehe FROM pumpen LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SO ON pumpen.schoben = SO.schnam LEFT JOIN (SELECT sohlhoehe, schnam FROM schaechte) AS SU ON pumpen.schunten = SU.schnam ) WHERE name="{}" """ for haltung in route.get("haltungen"): self.db.sql(statement.format(haltung)) name, schachtoben, schachtunten, laenge, sohlhoeheoben, sohlhoeheunten, querschnitt = ( self.db.fetchone() ) haltung_info[haltung] = dict( schachtoben=schachtoben, schachtunten=schachtunten, laenge=laenge, sohlhoeheoben=sohlhoeheoben, sohlhoeheunten=sohlhoeheunten, querschnitt=querschnitt, ) self.log.info("Haltunginfo wurde erstellt") statement = """ SELECT sohlhoehe,deckelhoehe FROM schaechte WHERE schnam="{}" """ for schacht in route.get("schaechte"): self.db.sql(statement.format(schacht)) res = self.db.fetchone() schacht_info[schacht] = dict(deckelhoehe=res[1], sohlhoehe=res[0]) self.log.info("Schachtinfo wurde erstellt") return schacht_info, haltung_info
gpl-3.0
5,700,301,946,520,769,000
41.540628
131
0.528359
false
GoogleCloudPlatform/cloud-spanner-emulator
tests/gcloud/database_admin_test.py
1
7936
# # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests for Cloud Spanner DatabaseAdmin api.""" from tests.gcloud import emulator class GCloudDatabaseAdminTest(emulator.TestCase): def testCreateDatabase(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # Create the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'create', 'test-database', '--instance=test-instance'), self.JoinLines('')) # Describe the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( 'name: projects/test-project/instances/test-instance/' 'databases/test-database', 'state: READY')) def testListsEmptyDatabases(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # List the databases. self.assertEqual( self.RunGCloud('spanner', 'databases', 'list', '--instance=test-instance'), self.JoinLines('')) def testListDatabases(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # Create the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'create', 'test-database', '--instance=test-instance'), self.JoinLines('')) # List the databases. # TODO : Remove version check after GCloud version is updated. if self.GCloudVersion() < 328: self.assertEqual( self.RunGCloud('spanner', 'databases', 'list', '--instance=test-instance'), self.JoinLines( 'NAME STATE', 'test-database READY')) else: self.assertEqual( self.RunGCloud('spanner', 'databases', 'list', '--instance=test-instance'), self.JoinLines( 'NAME STATE VERSION_RETENTION_PERIOD EARLIEST_VERSION_TIME', 'test-database READY')) def testDeleteDatabase(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # Create the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'create', 'test-database', '--instance=test-instance'), self.JoinLines('')) # Delete the database. # use --quiet to disable the interactive command prompt. self.assertEqual( self.RunGCloud('spanner', 'databases', 'delete', 'test-database', '--instance=test-instance', '--quiet'), self.JoinLines('')) def testCreateDatabaseWithDDL(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # Create the database. self.assertEqual( self.RunGCloud( 'spanner', 'databases', 'create', 'test-database', '--instance=test-instance', '--ddl=CREATE TABLE mytable (a INT64, b INT64) PRIMARY KEY(a)'), self.JoinLines('')) # Describe the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( 'name: projects/test-project/instances/test-instance/' 'databases/test-database', 'state: READY')) # TODO : Remove version check after GCloud version is updated. if self.GCloudVersion() <= 287: self.assertEqual( self.RunGCloud('spanner', 'databases', 'ddl', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( # pyformat: disable '--- |-', ' CREATE TABLE mytable (', ' a INT64,', ' b INT64,', ' ) PRIMARY KEY(a)' # pyformat: enable )) else: self.assertEqual( self.RunGCloud('spanner', 'databases', 'ddl', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( # pyformat: disable 'CREATE TABLE mytable (', ' a INT64,', ' b INT64,', ') PRIMARY KEY(a);' # pyformat: enable )) def testCreateDatabaseAndGetDatabaseDDL(self): # Create an instance. self.RunGCloud('spanner', 'instances', 'create', 'test-instance', '--config=emulator-config', '--description=Test Instance', '--nodes', '3') # Create the database. self.assertEqual( self.RunGCloud( 'spanner', 'databases', 'create', 'test-database', '--instance=test-instance', '--ddl=CREATE TABLE mytable (a INT64, b INT64, c STRING(256), d TIMESTAMP OPTIONS(allow_commit_timestamp=true)) PRIMARY KEY(a, b)' ), self.JoinLines('')) # Describe the database. self.assertEqual( self.RunGCloud('spanner', 'databases', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( 'name: projects/test-project/instances/test-instance/' 'databases/test-database', 'state: READY')) # TODO : Remove version check after GCloud version is updated. if self.GCloudVersion() <= 287: self.assertEqual( self.RunGCloud('spanner', 'databases', 'ddl', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( # pyformat: disable '--- |-', ' CREATE TABLE mytable (', ' a INT64,', ' b INT64,', ' c STRING(256),', ' d TIMESTAMP OPTIONS (', ' allow_commit_timestamp = true', ' ),', ' ) PRIMARY KEY(a, b)' # pyformat: enable )) else: self.assertEqual( self.RunGCloud('spanner', 'databases', 'ddl', 'describe', 'test-database', '--instance=test-instance'), self.JoinLines( # pyformat: disable 'CREATE TABLE mytable (', ' a INT64,', ' b INT64,', ' c STRING(256),', ' d TIMESTAMP OPTIONS (', ' allow_commit_timestamp = true', ' ),', ') PRIMARY KEY(a, b);' # pyformat: enable )) # TODO: Add a test that creates an index. # TODO: create tests for 'spanner databases ddl update'. # Note: there are no tests for IAM because it is unsupported in the emulator. if __name__ == '__main__': emulator.RunTests()
apache-2.0
7,036,179,360,892,971,000
38.287129
142
0.550277
false
rerpy/rerpy
rerpy/io/erpss.py
1
34069
# This file is part of rERPy # Copyright (C) 2012-2013 Nathaniel Smith <[email protected]> # See file LICENSE.txt for license information. import os.path import struct import os import string import bisect import sys import numpy as np import pandas from rerpy.data import DataFormat, Dataset from rerpy.util import maybe_open from rerpy.io._erpss import _decompress_crw_chunk PAUSE_CODE = 49152 DELETE_CODE = 57344 # There are also read_avg and write_erp_as_avg functions in here, but their # API probably needs another look before anyone should use them. __all__ = ["load_erpss"] # Derived from erp/include/64header.h: _header_dtype = np.dtype([ ("magic", "<u2"), ("epoch_len", "<i2"), # epoch length in msec ("nchans", "<i2"), ("sums", "<i2"), # 0 = ERP, 1 = single trial # -- 8 bytes -- ("tpfuncs", "<i2"), # number of processing funcs ("pp10uv", "<i2"), # points / 10 uV ("verpos", "<i2"), # 1 normally, -1 for sign inversion (I think?) ("odelay", "<i2"), # ms from trigger to stim (usually 8) # -- 16 bytes -- ("totevnt", "<i2"), # "total log events" (0 in mima217.avg) ("10usec_per_tick", "<i2"), ("time", "<i4"), # "time in sample clock ticks" (0 in mima217.avg) # -- 24 bytes -- ("cond_code", "<i2"), # (0 in mima217.avg) ("presam", "<i2"), # pre-event time in epoch in msec ("trfuncs", "<i2"), # "number of rejection functions" ("totrr", "<i2"), # "total raw records including rejects" (0 in mima217.avg) # -- 32 bytes -- ("totrej", "<i2"), # "total raw rejects" (0 in mima217.avg) (0 in mima217.avg) ("sbcode", "<i2"), # "subcondition number ( bin number )" (0 in mima217.avg) ("cprecis", "<i2"), # Our average contains cprecis * 256 samples ("dummy1", "<i2"), # -- 40 bytes -- ("decfact", "<i2"), # "decimation factor used in processing" ("dh_flag", "<i2"), # "see defines - sets time resolution" (0 in mima217.avg) ("dh_item", "<i4"), # "sequential item #" (0 in mima217.avg) # -- 48 bytes -- ("rfcnts", "<i2", (8,)), # "individual rejection counts 8 poss. rfs" ("rftypes", "S8", (8,)), # "8 char. descs for 8 poss. rfs" ("chndes", "S128"), ("subdes", "S40"), ("sbcdes", "S40"), ("condes", "S40"), ("expdes", "S40"), ("pftypes", "S24"), ("chndes2", "S40"), ("flags", "<u2"), # "see flag values below" (0 in mima217.avg) ("nrawrecs", "<u2"), # "# raw records if this is a raw file header" # (0 in mima217.avg) ("idxofflow", "<u2"), # (0 in mima217.avg) ("idxoffhi", "<u2"), # (0 in mima217.avg) ("chndes3", "S24"), # -- 512 bytes -- ]) # If, say, chndes has trailing null bytes, then rec["chndes"] will give us a # less-than-128-byte string back. But this function always gives us the full # 128 byte string, trailing nuls and all. def _get_full_string(record, key): val = record[key] desired_len = record.dtype.fields[key][0].itemsize return val + (desired_len - len(val)) * "\x00" # Translation tables for the ad hoc 6-bit character encoding used to encode # electrode names in the 64-channel format: _char2code = {} for i, char in enumerate(string.lowercase): _char2code[char] = i + 1 for i, char in enumerate(string.uppercase): _char2code[char] = i + 27 for i, char in enumerate(string.digits): _char2code[char] = i + 53 _code2char = dict([(v, k) for (k, v) in _char2code.iteritems()]) def _read_header(stream): header_str = stream.read(512) header = np.fromstring(header_str, dtype=_header_dtype)[0] if header["magic"] == 0x17a5: # Raw file magic number: fetcher = RawChunkFetcher(stream, header["nchans"]) elif header["magic"] == 0x97a5: # Compressed file magic number: fetcher = CrwChunkFetcher(stream, header["nchans"]) else: # pragma: no cover assert False, "Unrecognized file type" hz = 1 / (header["10usec_per_tick"] / 100000.0) if abs(hz - int(round(hz))) > 1e-6: raise ValueError("file claims weird non-integer sample rate %shz" % hz) hz = int(round(hz)) channel_names = _channel_names_from_header(header) # Also read out the various general informational bits: info = {} info["subject"] = header["subdes"] info["experiment"] = header["expdes"] info["odelay"] = header["odelay"] # And save the raw header in case anyone wants it later (you never know) info["erpss_raw_header"] = header_str return (fetcher, header["nchans"], hz, channel_names, info, header) def _channel_names_from_header(header): if header["nchans"] <= 16: # For small montages, each channel gets 8 bytes of ascii, smushed # together into a single array: return np.fromstring(_get_full_string(header, "chndes"), dtype="S8")[:header["nchans"]] elif header["nchans"] <= 32: # For mid-size montages, each channel gets 4 bytes: return np.fromstring(_get_full_string(header, "chndes"), dtype="S4")[:header["nchans"]] else: # And for large montages, a complicated scheme is used. # First, pull out and combine all the relevant buffers: chan_buf = (_get_full_string(header, "chndes") + _get_full_string(header, "chndes2") + _get_full_string(header, "chndes3")) # Then, each 3 byte chunk represents 4 characters, each coded in 6 # bits and packed together: channel_names_l = [] for i in xrange(header["nchans"]): chunk = np.fromstring(chan_buf[3*i : 3*i+3], dtype=np.uint8) codes = [ (chunk[0] >> 2) & 0x3f, (chunk[0] & 0x03) << 4 | (chunk[1] >> 4) & 0x0f, (chunk[1] & 0x0f) << 2 | (chunk[2] >> 6) & 0x03, (chunk[2] & 0x3f), ] chars = [_code2char[code] for code in codes if code != 0] channel_names_l.append("".join(chars)) return np.array(channel_names_l[:header["nchans"]]) def _channel_names_to_header(channel_names, header): header["nchans"] = len(channel_names) if len(channel_names) <= 16: header["chndes"] = np.asarray(channel_names, dtype="S8").tostring() elif len(channel_names) <= 32: header["chndes"] = np.asarray(channel_names, dtype="S4").tostring() else: encoded_names = [] for channel_name in channel_names: codes = [_char2code[char] for char in channel_name] codes += [0] * (4 - len(codes)) char0 = ((codes[0] << 2) | (codes[1] >> 4)) & 0xff char1 = ((codes[1] << 4) | (codes[2] >> 2)) & 0xff char2 = ((codes[2] << 6) | codes[3]) & 0xff encoded_names += [chr(char0), chr(char1), chr(char2)] concat_buf = "".join(encoded_names) header["chndes"] = concat_buf[:128] header["chndes2"] = concat_buf[128:128 + 40] header["chndes3"] = concat_buf[128 + 40:] if not np.all(_channel_names_from_header(header) == channel_names): raise ValueError("failed to encode channel names in header -- maybe " "some names are too long?") def test_channel_names_roundtrip(): # Try 1 char, 2 char, 3 char, 4 char names # Try all letters in 6-bit character set (digits, lowercase, uppercase) names = ["A", "a", "1", "Aa", "Aa1", "Aa1A"] import itertools for char, digit in itertools.izip(itertools.cycle(string.uppercase), itertools.cycle(string.digits)): names.append(char + char.lower() + digit) if len(names) == 64: break def t(test_names): header = np.zeros(1, dtype=_header_dtype)[0] _channel_names_to_header(test_names, header) got_names = _channel_names_from_header(header) assert np.all(got_names == test_names) # skip names == [], b/c we hit https://github.com/numpy/numpy/issues/3764 # and anyway, who cares about the nchans=0 case for i in xrange(1, len(names)): # Try all lengths t(names[:i]) # Also try some long names for small headers where they're allowed long_names = ["a" * i for i in xrange(8)] * 2 t(long_names) from nose.tools import assert_raises header = np.zeros(1, dtype=_header_dtype)[0] # But even for small headers, only 8 chars are allowed assert_raises(ValueError, _channel_names_to_header, ["a" * 9], header) # And for larger headers, only 4 chars are allowed for i in xrange(17, 64): assert_raises(ValueError, _channel_names_to_header, ["a" * 5] * i, header) def read_raw(stream, dtype, lazy): (fetcher, nchans, hz, channel_names, info, header) = _read_header(stream) # Data is stored in a series of "chunks" -- each chunk contains 256 s16 # samples from each channel (the 32/64/whatever analog channels, plus 1 # channel for codes -- that channel being first.). The code channel # contains a "record number" as its first entry in each chunk, which # simply increments by 1 each time. chunkno = 0 code_chunks = [] data_chunks = [] while True: read = fetcher.read_next_chunk(lazy) if read is None: break (code_chunk, data_chunk) = read assert len(code_chunk) == 256 assert code_chunk[0] == chunkno code_chunk[0] = 0 code_chunks.append(code_chunk) if not lazy: assert data_chunk.shape == (256 * nchans,) data_chunk.resize((256, nchans)) data_chunk = np.asarray(data_chunk, dtype=dtype) data_chunks.append(data_chunk) chunkno += 1 codes = np.concatenate(code_chunks) if lazy: data = None else: data = np.row_stack(data_chunks) return (fetcher, hz, channel_names, codes, data, info) # These two classes have slightly weird invariants. They have one of two life # cycles: # Option 1: # - __init__ # - read_next_chunk(False) called repeatedly to load all codes and data # Option 2: # - __init__ # - read_next_chunk(True) called repeatedly to load all codes # - get_chunk(chunk_number) called repeatedly to load random pieces of data # The key thing is that get_chunk is not guaranteed to work until after # read_next_chunk has been called to scan the whole file. # # read_next_chunk has the invariants that at entry, the stream will always be # pointing to the beginning of the wanted chunk, and then on exit, the stream # will always be pointing to beginning of the next chunk. class RawChunkFetcher(object): def __init__(self, stream, nchans): self._stream = stream self._nchans = nchans self._chunk_size_bytes = (nchans + 1) * 256 * 2 def read_next_chunk(self, lazy): buf = self._stream.read(self._chunk_size_bytes) # Check for EOF: if not buf: return None codes = np.fromstring(buf[:512], dtype="<u2") if lazy: data_chunk = None else: data_chunk = np.fromstring(buf[512:], dtype="<i2") return codes, data_chunk def get_chunk(self, chunk_number): offset = 512 + chunk_number * self._chunk_size_bytes self._stream.seek(offset) chunk_bytes = self._stream.read(self._chunk_size_bytes) data = np.fromstring(chunk_bytes[512:], dtype="<i2") return data class CrwChunkFetcher(object): def __init__(self, stream, nchans): self._stream = stream self._nchans = nchans self._offsets = [] def read_next_chunk(self, lazy): # Check for EOF: ncode_records_minus_one_buf = self._stream.read(1) if not ncode_records_minus_one_buf: return None # Code track (run length encoded): (ncode_records_minus_one,) = struct.unpack("<B", ncode_records_minus_one_buf) ncode_records = ncode_records_minus_one + 1 codes = np.empty(256, np.uint16) cursor = 0 for i in xrange(ncode_records): repeat_minus_one, code = struct.unpack("<BH", self._stream.read(3)) codes[cursor:cursor + repeat_minus_one + 1] = code cursor += repeat_minus_one + 1 assert cursor == 256 # Data bytes (delta encoded and packed into variable-length integers): # Record where these start so we can find it again in get_chunk(). self._offsets.append(self._stream.tell()) (ncompressed_words,) = struct.unpack("<H", self._stream.read(2)) compressed_data = self._stream.read(ncompressed_words * 2) if lazy: data_chunk = None else: # This is the slow part of loading data: data_chunk = _decompress_crw_chunk(compressed_data, ncompressed_words, self._nchans) return codes, data_chunk def get_chunk(self, chunk_number): self._stream.seek(self._offsets[chunk_number]) (ncompressed_words,) = struct.unpack("<H", self._stream.read(2)) return _decompress_crw_chunk(self._stream.read(ncompressed_words * 2), ncompressed_words, self._nchans) # XX FIXME: should perhaps wrap an LRU around this at some point. class LazyRecspan(object): def __init__(self, fetcher, dtype, nchans, recspan_start, recspan_stop): self._fetcher = fetcher self._dtype = dtype self._nchans = nchans self._recspan_start = recspan_start self._recspan_stop = recspan_stop def get_slice(self, local_start_tick, local_stop_tick): start_tick = self._recspan_start + local_start_tick stop_tick = self._recspan_start + local_stop_tick if stop_tick > self._recspan_stop: raise IndexError("attempt to index beyond end of recspan") output = np.empty((stop_tick - start_tick, self._nchans), dtype=self._dtype) cursor = 0 chunk_number = start_tick // 256 while True: tick = chunk_number * 256 if tick >= stop_tick: break data = self._fetcher.get_chunk(chunk_number) data.resize((256, self._nchans)) low = max(tick, start_tick) high = min(tick + 256, stop_tick) next_cursor = cursor + (high - low) output[cursor:next_cursor, :] = data[low - tick:high - tick] cursor = next_cursor chunk_number += 1 return output def test_LazyRecspan(): from nose.tools import assert_raises from rerpy.test import test_data_path for suffix in ["crw", "raw"]: (fetcher, hz, channames, codes, data, info) = read_raw( open(test_data_path("erpss/tiny-complete.%s" % (suffix,)), "rb"), "u2", False) # This fake recspan is chosen to cover part of the first and last # chunks, plus the entire middle chunk. It's exactly 512 samples long. lr = LazyRecspan(fetcher, "u2", len(channames), 128, 640) assert_raises(IndexError, lr.get_slice, 0, 513) for (start, stop) in [(0, 512), (10, 20), (256, 266), (500, 510), (120, 130)]: assert np.all(lr.get_slice(start, stop) == data[128 + start:128 + stop]) def assert_files_match(p1, p2): (_, hz1, channames1, codes1, data1, info1) = read_raw(open(p1, "rb"), "u2", False) for (p, lazy) in [(p1, True), (p2, False), (p2, True)]: (fetcher2, hz2, channames2, codes2, data2, info2 ) = read_raw(open(p, "rb"), "u2", lazy) assert hz1 == hz2 assert (channames1 == channames2).all() assert (codes1 == codes2).all() if lazy: assert data2 is None # Slight abuse, pretend that there's one recspan that has the whole # file loader2 = LazyRecspan(fetcher2, "u2", len(channames2), 0, len(codes2)) data2 = loader2.get_slice(0, len(codes2)) assert (data1 == data2).all() for k in set(info1.keys() + info2.keys()): if k != "erpss_raw_header": assert info1[k] == info2[k] def test_read_raw_on_test_data(): import glob from rerpy.test import test_data_path tested = 0 for rawp in glob.glob(test_data_path("erpss/*.raw")): crwp = rawp[:-3] + "crw" print rawp, crwp assert_files_match(rawp, crwp) tested += 1 # Cross-check, to make sure is actually finding the files... (bump up this # number if you add more test files): assert tested == 5 def test_64bit_channel_names(): from rerpy.test import test_data_path stream = open(test_data_path("erpss/two-chunks-64chan.raw"), "rb") (_, hz, channel_names, codes, data, info) = read_raw(stream, int, True) # "Correct" channel names as listed by headinfo(1): assert (channel_names == ["LOPf", "ROPf", "LMPf", "RMPf", "LTPf", "RTPf", "LLPf", "RLPf", "LPrA", "RPrA", "LTFr", "RTFr", "LLFr", "RLFr", "LDPf", "RDPf", "LTOc", "RTOc", "LTCe", "RTCe", "LLCe", "RLCe", "LDFr", "RDFr", "LMFr", "RMFr", "MiFo", "MiPf", "MiFr", "A2", "LHEy", "RHEy", "LIOc", "RIOc", "LLOc", "RLOc", "LLPP", "RLPP", "LLPa", "RLPa", "LDCe", "RDCe", "LMCe", "RMCe", "LDOc", "RDOc", "LDPP", "RDPP", "LDPa", "RDPa", "LCer", "RCer", "LMOc", "RMOc", "LMPP", "RMPP", "LMPa", "RMPa", "MiCe", "MiPa", "MiPP", "MiOc", "LLEy", "RLEy"] ).all() def read_log(file_like): fo = maybe_open(file_like) ticks = [] events = [] while True: event = fo.read(8) if not event: break (code, tick_hi, tick_lo, condition, flag) \ = struct.unpack("<HHHBB", event) ticks.append(tick_hi << 16 | tick_lo) events.append((code, condition, flag)) df = pandas.DataFrame(events, columns=["code", "condition", "flag"], index=ticks) df["flag_data_error"] = np.asarray(df["flag"] & 0o100, dtype=bool) df["flag_rejected"] = np.asarray(df["flag"] & 0o40, dtype=bool) df["flag_polinv"] = np.asarray(df["flag"] & 0o20, dtype=bool) return df # Little hack useful for testing. AFAIK this is identical to the erpss # 'makelog' program, except that: # - 'makelog' throws away some events from the end of the file, including the # very helpful final "pause" marker # - 'makelog' "cooks" the log file, i.e., toggles the high bit of all events # that occur in a span ended by a "delete mark" (see logfile.5). We don't # bother. (Though could, I guess.) def make_log(raw, condition=64): # pragma: no cover import warnings; warnings.warn("This code is not tested!") codes = read_raw(maybe_open(raw), np.float64, True)[3] log = [] for i in codes.nonzero()[0]: log.append(struct.pack("<HHHBB", codes[i], (i & 0xffff0000) >> 16, i & 0xffff, condition, 0)) if codes[i] in (PAUSE_CODE, DELETE_CODE): condition += 1 return "".join(log) def test_read_log(): def t(data, expected): from cStringIO import StringIO got = read_log(StringIO(data)) # .sort() is a trick to make sure columns line up from pandas.util.testing import assert_frame_equal assert_frame_equal(expected.sort(axis=1), got.sort(axis=1)) # The first 80 bytes of arquan25.log (from Delong, Urbach & Kutas 2005) data = "01000000ec01010001000000e103010001000000f50601004b00000044070100010000007b0701004b000000ca07010001000000010801004b0000004f08010001000000860801004b000000d5080100".decode("hex") # From 'logexam arquan25.log 1' (1 means, measure time in ticks) # then 'l 0 9' expected = pandas.DataFrame( {"code": [1, 1, 1, 75, 1, 75, 1, 75, 1, 75], "condition": [1] * 10, "flag": [0] * 10, "flag_data_error": [False] * 10, "flag_rejected": [False] * 10, "flag_polinv": [False] * 10, }, index=[492, 993, 1781, 1860, 1915, 1994, 2049, 2127, 2182, 2261], ) t(data, expected) # 80 bytes from arquan25.log, starting at 8080*8 bytes into the file data = "01000e00d39b010000c00e00ff9e010023010e005a9f000023010e00dc9f000023010e005da0000023010e00dea0000023010e005fa1000023010e00e1a1000023010e0062a2000023010e00e3a20000".decode("hex") # from logexam, 'l 8080 8089' expected = pandas.DataFrame( {"code": [1, 49152, 291, 291, 291, 291, 291, 291, 291, 291], "condition": [1, 1, 0, 0, 0, 0, 0, 0, 0, 0], "flag": [0] * 10, "flag_data_error": [False] * 10, "flag_rejected": [False] * 10, "flag_polinv": [False] * 10, }, index=[957395, 958207, 958298, 958428, 958557, 958686, 958815, 958945, 959074, 959203], ) t(data, expected) def load_erpss(raw, log, calibration_events="condition == 0", lazy=True, calibrate=False, calibrate_half_width_ticks=5, calibrate_low_cursor_time=None, calibrate_high_cursor_time=None, calibrate_pulse_size=None, calibrate_polarity=1): dtype = np.float64 metadata = {} if isinstance(raw, basestring): metadata["raw_file"] = os.path.abspath(raw) if isinstance(log, basestring): metadata["log_file"] = os.path.abspath(log) metadata["calibration_events"] = str(calibration_events) raw = maybe_open(raw) log = maybe_open(log) (fetcher, hz, channel_names, raw_codes, data, header_metadata ) = read_raw(raw, dtype, lazy) metadata.update(header_metadata) if calibrate: units = "uV" else: units = "RAW" data_format = DataFormat(hz, units, channel_names) total_ticks = raw_codes.shape[0] raw_log_events = read_log(log) expanded_log_codes = np.zeros(raw_codes.shape, dtype=int) try: expanded_log_codes[raw_log_events.index] = raw_log_events["code"] except IndexError as e: raise ValueError("log file claims event at position where there is " "no data: %s" % (e,)) # Sometimes people "delete" events by setting the high (sign) bit of the # code in the log file (e.g. with 'logpoke'). So we ignore this bit when # comparing log codes to raw codes -- mismatches here do not indicate an # error -- and then are careful to use the log codes, rather than the # raw codes, below. if np.any((expanded_log_codes & ~0x8000) != (raw_codes & ~0x8000)): raise ValueError("raw and log files have mismatched codes") del raw_codes del expanded_log_codes pause_events = (raw_log_events["code"] == PAUSE_CODE) delete_events = (raw_log_events["code"] == DELETE_CODE) break_events = pause_events | delete_events break_ticks = raw_log_events.index[break_events] # The pause/delete code appears at the last sample of the old era, so if # used directly, adjacent pause ticks give contiguous spans of recording # as (pause1, pause2]. (Confirmed by checking by hand in a real recording # that the data associated with the sample that has the pause code is # contiguous with the sample before, but not the sample after.) Adding +1 # to each of them then converts this to Python style [pause1, pause2) # intervals. There is a pause code at the last record of the file, but not # one at the first, so we add that in explicitly. break_ticks += 1 span_edges = np.concatenate(([0], break_ticks)) assert span_edges[0] == 0 assert span_edges[-1] == total_ticks span_slices = [slice(span_edges[i], span_edges[i + 1]) for i in xrange(len(span_edges) - 1)] dataset = Dataset(data_format) for span_slice in span_slices: if lazy: lr = LazyRecspan(fetcher, dtype, len(channel_names), span_slice.start, span_slice.stop) dataset.add_lazy_recspan(lr, span_slice.stop - span_slice.start, metadata) else: dataset.add_recspan(data[span_slice, :], metadata) span_starts = [s.start for s in span_slices] recspan_ids = [] start_ticks = [] for tick in raw_log_events.index: recspan_id = bisect.bisect(span_starts, tick) - 1 span_slice = span_slices[recspan_id] span_start = span_slice.start span_stop = span_slice.stop assert span_start <= tick < span_stop recspan_ids.append(recspan_id) start_ticks.append(tick - span_start) stop_ticks = [tick + 1 for tick in start_ticks] dataset.add_events(recspan_ids, start_ticks, stop_ticks, raw_log_events) for delete_event in dataset.events_query({"code": DELETE_CODE}): delete_event.recspan_info["deleted"] = True for cal_event in dataset.events_query(calibration_events): for key in list(cal_event): del cal_event[key] cal_event["calibration_pulse"] = True if calibrate: for kwarg in ["calibrate_low_cursor_time", "calibrate_high_cursor_time", "calibrate_pulse_size"]: if locals()[kwarg] is None: raise ValueError("when calibrating, %s= argument must be " "specified" % (kwarg,)) half_width = dataset.data_format.ticks_to_ms(calibrate_half_width_ticks) cal_vals = {} for which, cursor_time in [("low", calibrate_low_cursor_time), ("high", calibrate_high_cursor_time)]: # Round cursor to nearest tick cursor_tick = dataset.data_format.ms_to_ticks(cursor_time) cursor_time = dataset.data_format.ticks_to_ms(cursor_tick) erp = dataset.rerp("calibration_pulse", cursor_time - half_width, cursor_time + half_width, "1", all_or_nothing=True, overlap_correction=False, verbose=False) cal_vals[which] = erp.betas["Intercept"].mean() cal_diffs = cal_vals["high"] - cal_vals["low"] calibrate_pulse_size *= calibrate_polarity # For each channel, we want to multiply by a factor with units uV/raw # We have calibrate_pulse_size uV = cal_diffs raw cal_scaler = calibrate_pulse_size / cal_diffs dataset.transform(np.diagflat(np.asarray(cal_scaler))) return dataset def test_load_erpss(): from rerpy.test import test_data_path # This crw/log file is constructed to have a few features: # - it only has 3 records, so it's tiny # - the first two records are in one recspan, the last is in a second, so # we test the recspan splitting code # - the first recspan ends in a PAUSE event, the second ends in a DELETE # event, so we test the deleted event handling. # There are some weird things about it too: # - several events in the first recspan have condition 0, to test # calibration pulse stuff. In a normal ERPSS file all events within a # single recspan would have the same condition number. # - most of the event codes are >32767. In a normal ERPSS file such events # are supposed to be reserved for special stuff and deleted events, but # it happens the file I was using as a basis violated this rule. Oh # well. for lazy in [False, True]: dataset = load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.log"), lazy=lazy) assert len(dataset) == 2 assert dataset[0].shape == (512, 32) assert dataset[1].shape == (256, 32) assert dataset.data_format.exact_sample_rate_hz == 250 assert dataset.data_format.units == "RAW" assert list(dataset.data_format.channel_names) == [ "lle", "lhz", "MiPf", "LLPf", "RLPf", "LMPf", "RMPf", "LDFr", "RDFr", "LLFr", "RLFr", "LMFr", "RMFr", "LMCe", "RMCe", "MiCe", "MiPa", "LDCe", "RDCe", "LDPa", "RDPa", "LMOc", "RMOc", "LLTe", "RLTe", "LLOc", "RLOc", "MiOc", "A2", "HEOG", "rle", "rhz", ] for recspan_info in dataset.recspan_infos: assert recspan_info["raw_file"].endswith("tiny-complete.crw") assert recspan_info["log_file"].endswith("tiny-complete.log") assert recspan_info["experiment"] == "brown-1" assert recspan_info["subject"] == "Subject p3 2008-08-20" assert recspan_info["odelay"] == 8 assert len(recspan_info["erpss_raw_header"]) == 512 assert dataset.recspan_infos[0].ticks == 512 assert dataset.recspan_infos[1].ticks == 256 assert dataset.recspan_infos[1]["deleted"] assert len(dataset.events()) == 14 # 2 are calibration events assert len(dataset.events("has code")) == 12 for ev in dataset.events("has code"): assert ev["condition"] in (64, 65) assert ev["flag"] == 0 assert not ev["flag_data_error"] assert not ev["flag_polinv"] assert not ev["flag_rejected"] for ev in dataset.events("calibration_pulse"): assert dict(ev) == {"calibration_pulse": True} def check_ticks(query, recspan_ids, start_ticks): events = dataset.events(query) assert len(events) == len(recspan_ids) == len(start_ticks) for ev, recspan_id, start_tick in zip(events, recspan_ids, start_ticks): assert ev.recspan_id == recspan_id assert ev.start_tick == start_tick assert ev.stop_tick == start_tick + 1 check_ticks("condition == 64", [0] * 8, [21, 221, 304, 329, 379, 458, 483, 511]) check_ticks("condition == 65", [1] * 4, [533 - 512, 733 - 512, 762 - 512, 767 - 512]) check_ticks("calibration_pulse", [0, 0], [250, 408]) # check calibration_events option dataset2 = load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.log"), lazy=lazy, calibration_events="condition == 65") assert len(dataset2.events("condition == 65")) == 0 assert len(dataset2.events("condition == 0")) == 2 assert len(dataset2.events("calibration_pulse")) == 4 # check calibration # idea: if calibration works, then the "calibration erp" will have been # set to be the same size as whatever we told it to be. dataset_cal = load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.log"), lazy=lazy, calibration_events="condition == 65", calibrate=True, calibrate_half_width_ticks=2, calibrate_low_cursor_time=-16, calibrate_high_cursor_time=21, calibrate_pulse_size=12.34, calibrate_polarity=-1) assert dataset_cal.data_format.units == "uV" # -16 ms +/-2 ticks = -24 to -8 ms low_cal = dataset_cal.rerp("calibration_pulse", -24, -8, "1", all_or_nothing=True, overlap_correction=False) # 21 ms rounds to 20 ms, +/-2 ticks for the window = 12 to 28 ms high_cal = dataset_cal.rerp("calibration_pulse", 12, 28, "1", all_or_nothing=True, overlap_correction=False) low = low_cal.betas["Intercept"].mean(axis=0) high = high_cal.betas["Intercept"].mean(axis=0) assert np.allclose(high - low, -1 * 12.34) # check that we can load from file handles (not sure if anyone cares but # hey you never know...) crw = open(test_data_path("erpss/tiny-complete.crw"), "rb") log = open(test_data_path("erpss/tiny-complete.log"), "rb") assert len(load_erpss(crw, log, lazy=lazy)) == 2 # check that code/raw mismatch is detected from nose.tools import assert_raises for bad in ["bad-code", "bad-tick", "bad-tick2"]: assert_raises(ValueError, load_erpss, test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.%s.log" % (bad,)), lazy=lazy) # But if the only mismatch is an event that is "deleted" (sign bit # set) in the log file, but not in the raw file, then that is okay: load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.code-deleted.log"), lazy=lazy) # Compare lazy to eager directly eager = load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.log"), lazy=False) lazy = load_erpss(test_data_path("erpss/tiny-complete.crw"), test_data_path("erpss/tiny-complete.log"), lazy=True) from pandas.util.testing import assert_frame_equal assert len(eager) == len(lazy) for eager_recspan, lazy_recspan in zip(eager, lazy): assert_frame_equal(eager_recspan, lazy_recspan)
gpl-2.0
7,584,911,027,314,555,000
43.418514
187
0.576859
false
jpardobl/naman
naman/core/models.py
1
21503
import logging import re from django.db import models from django.db import transaction from django.core.exceptions import ObjectDoesNotExist import ipaddr import simplejson from django.db.models.signals import pre_save from django.dispatch import receiver from django.db.models.signals import post_save # Get an instance of a logger from naman.core.pypelib import RuleTable logger = logging.getLogger(__name__) class MoreThanOneIfacePerVlanError(Exception): pass class IfaceSequence(models.Model): machine = models.ForeignKey("Machine") vlan = models.ForeignKey("VLan") last_number = models.IntegerField(default=0) @property def incr(self, ): self.last_number += 1 self.save() return self.last_number class HostnameSequence(models.Model): prefix = models.CharField(max_length=11) last_number = models.IntegerField(default=0) @property def next(self, ): return self.last_number + 1 @property def incr(self, ): self.last_number += 1 self.save() return self.last_number def __unicode__(self, ): return u"%s(%s)" % (self.prefix, self.last_number) class CICaracteristic(models.Model): code = models.CharField(max_length=4) description = models.TextField() def __unicode__(self, ): return u"%s" % self.code class Meta: abstract = True class DNSZone(models.Model): name = models.CharField(max_length=100) def __unicode__(self, ): return u"%s" % self.name def save(self, *args, **kwargs): new = False if self.pk is None: new = True super(DNSZone, self).save(*args, **kwargs) if new: logger.info("New DNSZone created: %s" % self) class MType(models.Model): name = models.CharField(max_length=20) auto_name = models.BooleanField(default=True) has_serial = models.BooleanField(default=True) def __unicode__(self, ): return u"%s" % self.name class Environment(models.Model): code = models.CharField(max_length=4, blank=True) description = models.TextField() backup_vlans = models.ManyToManyField('VLan', null=True, blank=True) service_vlans = models.ManyToManyField( 'VLan', related_name='environments', null=True, blank=True) def __unicode__(self, ): return u"%s" % self.description def to_pypelib(self): return str(self.code) class Role(CICaracteristic): needs_backup_vlan = models.BooleanField(default=False) def to_pypelib(self): return u"%s" % self.code class OperatingSystem(CICaracteristic): def __unicode__(self, ): return u"%s" % self.description class ConflictingIP(models.Model): ip = models.GenericIPAddressField(unique=True) comments = models.TextField(null=True, blank=True) created_at = models.DateTimeField(auto_now_add=True) def __unicode__(self, ): return u"%s" % self.ip class ExcludedIPRange(models.Model): first = models.IPAddressField() last = models.IPAddressField() vlan = models.ForeignKey("VLan", related_name="excluded_ranges") class ExcludedIPError(Exception): pass def __unicode__(self, ): return "vlan: %s (%s - %s)" % (self.vlan, self.first, self.last) @property def as_tuple(self, ): return (self.first, self.last) def in_range(self, ip): if not isinstance(ip, ipaddr.IPv4Address): ip = ipaddr.IPv4Address(ip) if not isinstance(self.first, ipaddr.IPv4Address): self.first = ipaddr.IPv4Address(self.first) if not isinstance(self.last, ipaddr.IPv4Address): self.last = ipaddr.IPv4Address(self.last) if ip in self.as_tuple: return True if ip > self.first and ip < self.last: return True return False @receiver(pre_save, sender=ExcludedIPRange) def pre_save_excludediprange(sender, instance, **kwargs): if not instance.vlan.is_ip_valid(instance.first): raise ipaddr.AddressValueError("First IP is not correct for vlan") if not instance.vlan.is_ip_valid(instance.last): raise ipaddr.AddressValueError("Last IP is not correct for vlan") class VLanManager(models.Manager): def get_from_ip(self, ip): for vlan in self.all(): if vlan.is_ip_valid(ip): return vlan return None class VLan(models.Model): name = models.CharField(max_length=20) tag = models.IntegerField() ip = models.IPAddressField() gw = models.IPAddressField() mask = models.IntegerField() management_purpose = models.BooleanField(default=False) provisioning_purpose = models.BooleanField(default=False) dhcp = models.BooleanField(default=True) #general_purpose_service = models.BooleanField(default=False) class Meta: ordering = ("name", ) class NoFreeIPError(Exception): pass def __unicode__(self, ): return u"%s(%s)" % (self.name, self.tag) @property def info(self, ): hosts = [x for x in self.network.iterhosts()] return u"%s; first_ip: %s; last_ip: %s; num_hosts: %s" % ( self, hosts[0], hosts[-1], len(hosts), ) def is_ip_valid(self, ip): """ Calculates if an IP belongs to the vlan addressing scope""" if not isinstance(ip, ipaddr.IPv4Address): ip = ipaddr.IPv4Address(ip) for nip in self.network.iterhosts(): if nip == ip: return True return False @property def network(self, ): try: return ipaddr.IPv4Network("%s/%s" % (self.ip, self.mask)) except Exception: raise ValueError( "VLan.network: Can't calculate vlan network due to vlan %s misconfiguration" % self.name) @property def has_free_ip(self, ): return True if self.get_ip() else False @property def free_ips(self, ): eranges = self.excluded_ranges ips = [] for ip in self.network.iterhosts(): print "tryuing with: %s" % ip try: for erange in eranges.all(): if erange.in_range(ip): raise ExcludedIPRange.ExcludedIPError #print "query: %s" % ConflictingIP.objects.filter(ip=ip).query if ConflictingIP.objects.filter(ip=str(ip)).exists(): continue except ExcludedIPRange.ExcludedIPError: continue try: Iface.objects.get(ip=str(ip)) except ObjectDoesNotExist: print "metemos esta: %s" % ip ips.append("\"%s\"" % ip) return ips def get_ip(self, ): """ searches and returns a free IP, respects excluded ranges and conflicting ips""" eranges = self.excluded_ranges #print "looking for an IP in scope: %s" % self.network for ip in self.network.iterhosts(): #print "tryuing with: %s" % ip try: for erange in eranges.all(): if erange.in_range(ip): raise ExcludedIPRange.ExcludedIPError #print "query: %s" % ConflictingIP.objects.filter(ip=ip).query if ConflictingIP.objects.filter(ip=str(ip)).exists(): continue except ExcludedIPRange.ExcludedIPError: continue try: Iface.objects.get(ip=str(ip)) except ObjectDoesNotExist: return str(ip) return None def save(self, *args, **kwargs): new = False if self.pk is None: new = True super(VLan, self).save(*args, **kwargs) if new: logger.info("New VLAN created: %s" % self) # Create your models here. class Project(models.Model): name = models.CharField(max_length=200) code = models.CharField(max_length=20) dmz = models.ForeignKey('VLan', null=True, blank=True) service_vlans = models.ManyToManyField('VLan', related_name='projects') #machines = models.ManyToManyField("Machine") def __unicode__(self, ): return u"%s(%s)" % (self.name, self.code) def save(self, *args, **kwargs): new = False if self.pk is None: new = True super(Project, self).save(*args, **kwargs) if new: logger.info("New Project created: %s" % self) def to_pypelib(self): return str(self.code) class Machine(models.Model): hostname = models.CharField(max_length=25, blank=True, null=True) dns_zone = models.ForeignKey(DNSZone, blank=True, null=True) environment = models.ForeignKey(Environment, blank=True, null=True) role = models.ForeignKey(Role, blank=True, null=True) operating_system = models.ForeignKey( OperatingSystem, blank=True, null=True) virtual = models.BooleanField(default=False) project = models.ForeignKey( Project, related_name="machines", null=True, blank=True) mtype = models.ForeignKey(MType, null=True, blank=True) location = models.CharField(max_length=50, null=True, blank=True) dmz_located = models.BooleanField(default=False) #close_to = models.ForeignKey(Machine) class Meta: ordering = ("hostname",) def initialize_hostname(self, ): if self.role is None: raise AttributeError("If no hostname is specified, role must be initialized") if self.operating_system is None: raise AttributeError("If no hostname is specified, operating system must be initialized") if self.environment is None: raise AttributeError("If no hostname is specified, environment must be initialized") hn = u"%s%s%s%s" % ( self.role.code, self.project.code if self.project is not None else "", self.operating_system.code, self.environment.code if self.environment.code != "PRO" else "", ) hn = hn.lower() if self.mtype.has_serial: hn = "%s%s" % ( hn, HostnameSequence.objects.get_or_create(prefix=hn)[0].incr) self.hostname = hn def initialize_dnszone(self): try: if self.operating_system.code == "w": self.dns_zone = DNSZone.objects.get(name=".iberia.ib") else: self.dns_zone = DNSZone.objects.get(name=".ib") if self.environment.code == "lab": self.dns_zone = DNSZone.objects.get(name=".lab") except AttributeError: pass def save(self, *args, **kwargs): new = False print "save hostbname: %s" % self.hostname with transaction.commit_on_success(): if self.pk is None: new = True if(self.mtype is not None and self.mtype.auto_name and self.hostname in (None, "") ): self.initialize_hostname() elif self.hostname is None or self.hostname == "": raise AttributeError( "Hostname missing for a non automatic one") self.initialize_dnszone() super(Machine, self).save(*args, **kwargs) if new: logger.info("New machine created: %s" % self) @property def fqdn(self, ): return u"%s%s" % (self.hostname, self.dns_zone) def __unicode__(self, ): return u"%s" % self.fqdn if not self.dns_zone is None else self.hostname def has_iface_on_vlan(self, vlan): return self.interfaces.filter(vlan=vlan).exists() def get_vlanconfig(self): if not self.vlan_configs.count(): self.vlan_configs.append(VLanConfig()) return self.vlan_configs.get() class Service(models.Model): name = models.CharField(max_length=255) description = models.TextField(null=True, blank=True) iface = models.ForeignKey("Iface", related_name="services", verbose_name='Ip') #Esto permite referenciar los servicios de un iface desde un objeto Iface def __unicode__(self, ): return u"%s" % self.name class IfaceManager(models.Manager): def query_cmd(self, query_string): m = re.match("^(.+)=(.+)$", query_string) if not m: #print "no hay coincidencia" return None try: print "Se ha encontrado: %s" % m.group(2) queriable = { "name": self.filter(name__iregex=m.group(2)), "vlan": self.filter(vlan__name__iregex=m.group(2)), "machines": self.filter(machines__hostname__iregex=m.group(2)), "ip": self.filter(ip__iregex=m.group(2)), "gw": self.filter(gw__iregex=m.group(2)), "mask": self.filter(mask__iregex=m.group(2)), "comments": self.filter(comments__iregex=m.group(2)), "mac": self.filter(mac__iregex=m.group(2)), "nat": self.filter(nat__iregex=m.group(2)), } return queriable[m.group(1)] except KeyError: pass return None class Iface(models.Model): objects = IfaceManager() PREFIX = "eth" name = models.CharField(max_length=11, blank=True) vlan = models.ForeignKey(VLan) ip = models.GenericIPAddressField(blank=True, unique=True, null=True) gw = models.GenericIPAddressField(blank=True, default="0.0.0.0") mask = models.IntegerField(blank=True) machines = models.ManyToManyField( Machine, related_name="interfaces", null=True, blank=True) comments = models.TextField(null=True, blank=True) mac = models.CharField(max_length=17, null=True, blank=True) nat = models.GenericIPAddressField(null=True, blank=True) virtual = models.BooleanField(default=False) dhcp = models.BooleanField(default=False) def to_json(self, ): return simplejson.dumps({ "id": self.pk, "ip": self.ip, "vlan": self.vlan.pk, "dhcp": self.dhcp, }) def __unicode__(self, ): return u"%s" % self.ip if not self.ip is None else self.name if self.name != "" else "IFACE" @staticmethod def excluded_in_ranges(ip, vlan=None): if vlan is None: vlan = Iface.find_vlan(ip) if vlan is None: return [] exclusions = [] for eir in ExcludedIPRange.objects.filter(vlan=vlan): if eir.in_range(ip): exclusions.append(eir) return exclusions @staticmethod def find_vlan(ip): """ Looksup a valid vlan for the passed ip. if no vlan found, return None """ for vlan in VLan.objects.all(): try: if vlan.is_ip_valid(ip): return vlan except ValueError: logger.error("VLan %s might be misconfigured, thus can't check if ip %s belongs to it" % (vlan, ip)) continue return None def save(self, manual=None): new = False with transaction.commit_on_success(): """ El parametro 'manual' sirve para que toda la asignacion de configuraciones automaticas que representa el siguiente 'if' solo se ejecute desde el API, la vista views_iface.edit_by_machine pasa el parametro manual=True para que no se ejecute el algoritmo que si necesita el API """ if self.pk is None and not manual: new = True self.dhcp = self.vlan.dhcp self.ip = self.vlan.get_ip() if self.ip in (None, "") and not self.dhcp else self.ip #If IP comes from user, check its valid for vlan if not self.dhcp and not self.vlan.is_ip_valid(self.ip): raise AttributeError("Ip %s is not valid for vlan %s" % (self.ip, self.vlan)) self.gw = self.vlan.gw self.mask = self.vlan.mask super(Iface, self).save() if new: logger.info("New Iface created: %s" % self) COND_CHOICES = ( ('environment', 'Machine environment'), ('project', 'Machine project'), ('role_needs_backup', 'Machine role needs backup'), ('dmz_located', 'Machine located in DMZ'), ("needs_backup", "Machine needs backup"), ('needs_management', "Machine needs management"), ) class Conditional(models.Model): statement1 = models.CharField(max_length=60,choices=COND_CHOICES, default="environment") statement2 = models.CharField(max_length=60, null=True, blank=True) #ocurred = models.BooleanField(default=False) def __unicode__(self): return u"%s is %s" % (self.statement1, self.statement2) def to_pypelib(self): return u"(%s = %s)" % (str(self.statement1), str(self.statement2)) ACTION_CHOICES = ( ("assign_backup_vlan", "Assign backup vlan"), ("assign_management_vlan", "Assign management vlan"), ("assign_provisioning_vlan", "Assign provisioning vlan"), ("assign_dmz_based_on_project", "Assign DMZ based on project"), ("assign_service_vlan_based_on_project", "Assign service vlan based on project"), ("assign_general_purpose_service_vlan", "Assign general purpose vlan"), ) class Rule(models.Model): table = models.CharField(max_length=30) conditionals = models.ManyToManyField(Conditional, null=True, blank=True) #conditionals = models.CharField(max_length=65, choices=COND_CHOICES) action = models.CharField(max_length=65, choices=ACTION_CHOICES, null=True, blank=True) active = models.BooleanField(default=True) terminal = models.BooleanField(default=False) def __unicode__(self, ): return self.to_pypelib() return "[%s] conditionals: %s; action: %s" % ( self.active, self.conditionals.all(), self.action, ) def to_pypelib(self, ): conds = self.conditionals.all() out = "if " if conds.count(): out = "%s (" % out for c in conds: out = "%s %s && " % (out, c.to_pypelib()) out = re.sub("&&\s$", ")", out) #print out #out = re.sub("&&$", "", out) if conds.count() == 0: out = "%s 1 = 1 " % out return "%s then accept %s do %s" % (out, "" if self.terminal else "nonterminal", self.action) class VLanConfig(models.Model): machine = models.ForeignKey(Machine, related_name="vlan_configs") vlans = models.ManyToManyField(VLan, blank=True, null=True) needs_backup = models.BooleanField(default=True) needs_management = models.BooleanField(default=False) class Meta: verbose_name = "Configuracion de vlanes para maquina" verbose_name_plural = "Configuraciones de vlanes para maquinas" def __unicode__(self, ): out = u"VLan config for %s: [" % self.machine for vlan in self.vlans.all().order_by('name'): out = "%s %s," % (out, vlan) return u"%s]" % out def append_vlan(self, vlan): """ Adds a new vlan checking if it has free IPs """ if not vlan.has_free_ip: raise VLan.NoFreeIPError(vlan) self.vlans.add(vlan) print("Vlan %s added" % vlan) def save(self, *args, **kwargs): new = (self.pk is None) super(VLanConfig, self).save(*args, **kwargs) @receiver(post_save, sender=VLanConfig) def post_save_vlanconfig(sender, instance, **kwargs): from naman.core.mappings import get_mappings from naman.core.pypelib.RuleTable import RuleTable if instance.machine.role is None: raise AttributeError("Machine %s has no role assigned" % instance.machine) if instance.machine.environment is None: raise AttributeError("Machine %s has no environment assigned" % instance.machine) table = RuleTable( "Backup, service and management", get_mappings(), "RegexParser", #rawfile, "RAWFile", None) table.setPolicy(False) for rule in Rule.objects.filter(active=True).order_by("pk"): table.addRule(rule.to_pypelib()) logging.debug(table.dump()) try: table.evaluate(instance.machine) logging.debug("Ha evaluado a True") except Exception, ex: import traceback print "Ha evaluado a False: %s" % traceback.format_exc() logging.debug("Table evaluated False") pass logging.debug("Vlan config saved, vlans: %s" % instance.vlans.all()) for vlan in instance.vlans.all(): iface = Iface(vlan=vlan) iface.save() iface.machines.add(instance.machine) @receiver(pre_save, sender=VLanConfig) def pre_save_vlanconfig(sender, instance, **kwargs): if instance.pk is None: #deleting possible previous vlanconfigs, only if its new VLanConfig.objects.filter(machine=instance.machine).delete()
bsd-3-clause
-3,268,713,329,246,801,000
30.715339
156
0.58829
false
Pexego/PXGO_00049_2013_PCG
project-addons/purchase_oferts/__openerp__.py
1
1398
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2004-2014 Pexego Sistemas Informáticos All Rights Reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { "name": "Ofertas de proveedores", "version": "1.0", "depends": ["purchase", "hr", "purchase_requisition"], "author": "Pexego", "category": "purchase", "description": """ This module provide : """, "init_xml": [], 'update_xml': ["wizard/generate_purchases_wizard_view.xml","purchase_oferts_view.xml"], 'demo_xml': [], 'installable': True, 'active': False, # 'certificate': 'certificate', }
agpl-3.0
6,808,435,858,211,107,000
38.914286
91
0.592699
false
JasonKessler/scattertext
scattertext/test/test_HTMLVisualizationAssembly.py
1
28860
import sys from unittest import TestCase from scattertext import HTMLSemioticSquareViz from scattertext.Common import DEFAULT_D3_URL, DEFAULT_D3_SCALE_CHROMATIC, DEFAULT_DIV_ID, DEFAULT_D3_AXIS_VALUE_FORMAT from scattertext.test.test_semioticSquare import get_test_semiotic_square from scattertext.viz.BasicHTMLFromScatterplotStructure import BasicHTMLFromScatterplotStructure from scattertext.viz.ScatterplotStructure import ScatterplotStructure from scattertext.viz.VizDataAdapter import VizDataAdapter class TestHTMLVisualizationAssembly(TestCase): def get_params(self, param_dict={}): params = ['1000', '600', 'null', 'null', 'true', 'false', 'false', 'false', 'false', 'true', 'false', 'false', 'true', '0.1', 'false', 'undefined', 'undefined', 'getDataAndInfo()', 'true', 'false', 'null', 'null', 'null', 'null', 'true', 'false', 'true', 'false', 'null', 'null', '10', 'null', 'null', 'null', 'false', 'true', 'true', '"' + DEFAULT_DIV_ID + '"', 'null', 'false', 'false', '"' + DEFAULT_D3_AXIS_VALUE_FORMAT + '"', '"' + DEFAULT_D3_AXIS_VALUE_FORMAT + '"', 'false', '-1', 'true', 'false', 'true', 'false', 'false', 'false', 'true', 'null', 'null', 'null', 'false', 'null', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', 'undefined', '14', '0'] for i, val in param_dict.items(): params[i] = val return 'buildViz(' + ',\n'.join(params) + ');\n' def make_assembler(self): scatterplot_structure = ScatterplotStructure(self.make_adapter()) return BasicHTMLFromScatterplotStructure(scatterplot_structure) def make_adapter(self): words_dict = {"info": {"not_category_name": "Republican", "category_name": "Democratic"}, "data": [{"y": 0.33763837638376387, "term": "crises", "ncat25k": 0, "cat25k": 1, "x": 0.0, "s": 0.878755930416447}, {"y": 0.5, "term": "something else", "ncat25k": 0, "cat25k": 1, "x": 0.0, "s": 0.5}]} visualization_data = VizDataAdapter(words_dict) return visualization_data def test_main(self): assembler = self.make_assembler() html = assembler.to_html() if sys.version_info.major == 2: self.assertEqual(type(html), unicode) else: self.assertEqual(type(html), str) self.assertFalse('<!-- EXTRA LIBS -->' in html) self.assertFalse('<!-- INSERT SCRIPT -->' in html) self.assertTrue('<!-- INSERT SEMIOTIC SQUARE -->' in html) self.assertTrue('Republican' in html) def test_semiotic_square(self): semsq = get_test_semiotic_square() assembler = self.make_assembler() html = assembler.to_html( html_base=HTMLSemioticSquareViz(semsq).get_html(num_terms=6)) if sys.version_info.major == 2: self.assertEqual(type(html), unicode) else: self.assertEqual(type(html), str) self.assertFalse('<!-- EXTRA LIBS -->' in html) # self.assertFalse('<!-- INSERT SEMIOTIC SQUARE -->' in html) self.assertFalse('<!-- INSERT SCRIPT -->' in html) self.assertTrue('Republican' in html) def test_save_svg_button(self): scatterplot_structure = ScatterplotStructure(self.make_adapter(), save_svg_button=True) assembly = BasicHTMLFromScatterplotStructure(scatterplot_structure) html = assembly.to_html() self.assertEqual(scatterplot_structure.call_build_visualization_in_javascript(), self.get_params({11: 'true'})) self.assertFalse('<!-- INSERT SCRIPT -->' in html) # self.assertTrue('d3-save-svg.min.js' in html) def test_protocol_is_https(self): html = self.make_assembler().to_html(protocol='https') self.assertTrue(self._https_script_is_present(html)) self.assertFalse(self._http_script_is_present(html)) def test_protocol_is_http(self): html = self.make_assembler().to_html(protocol='http') self.assertFalse(self._https_script_is_present(html)) self.assertTrue(self._http_script_is_present(html)) def _http_script_is_present(self, html): return 'src="http://' in html def _https_script_is_present(self, html): return 'src="https://' in html def test_protocol_default_d3_url(self): html = self.make_assembler().to_html() self.assertTrue(DEFAULT_D3_URL in html) html = self.make_assembler().to_html(d3_url='d3.js') self.assertTrue(DEFAULT_D3_URL not in html) self.assertTrue('d3.js' in html) def test_protocol_default_d3_chromatic_url(self): html = self.make_assembler().to_html() self.assertTrue(DEFAULT_D3_SCALE_CHROMATIC in html) html = self.make_assembler().to_html(d3_scale_chromatic_url='d3-scale-chromatic.v1.min.js') self.assertTrue(DEFAULT_D3_SCALE_CHROMATIC not in html) self.assertTrue('d3-scale-chromatic.v1.min.js' in html) def test_protocol_defaults_to_http(self): self.assertEqual(self.make_assembler().to_html(protocol='http'), self.make_assembler().to_html()) def test_raise_invalid_protocol_exception(self): with self.assertRaisesRegexp(BaseException, "Invalid protocol: ftp. Protocol must be either http or https."): self.make_assembler().to_html(protocol='ftp') def test_height_width_default(self): # assembler = self.make_assembler() scatterplot_structure = ScatterplotStructure(self.make_adapter()) self.assertEqual(scatterplot_structure.call_build_visualization_in_javascript(), self.get_params()) def test_color(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, color='d3.interpolatePurples') .call_build_visualization_in_javascript()), self.get_params({3: 'd3.interpolatePurples'})) def test_full_doc(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, use_full_doc=True) .call_build_visualization_in_javascript()), self.get_params({5: 'true'})) def test_grey_zero_scores(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, grey_zero_scores=True) .call_build_visualization_in_javascript()), self.get_params({6: 'true'})) def test_chinese_mode(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, asian_mode=True) .call_build_visualization_in_javascript()), self.get_params({7: 'true'})) def test_reverse_sort_scores_for_not_category(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, reverse_sort_scores_for_not_category=False) .call_build_visualization_in_javascript()), self.get_params({12: 'false'})) def test_height_width_nondefault(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000) .call_build_visualization_in_javascript()), self.get_params({0: '1000'})) self.assertEqual((ScatterplotStructure(visualization_data, height_in_pixels=60) .call_build_visualization_in_javascript()), self.get_params({1: '60'})) self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60'})) def test_use_non_text_features(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, use_non_text_features=True) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 8: 'true'})) def test_show_characteristic(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, show_characteristic=False) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 9: 'false'})) def test_max_snippets(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, max_snippets=None) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60'})) self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, max_snippets=100) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 2: '100'})) def test_word_vec_use_p_vals(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, word_vec_use_p_vals=True) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 10: 'true'})) def test_max_p_val(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, word_vec_use_p_vals=True, max_p_val=0.01) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 10: 'true', 13: '0.01'})) def test_p_value_colors(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, word_vec_use_p_vals=True, p_value_colors=True) .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 10: 'true', 14: 'true'})) def test_x_label(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, x_label='x label') .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 15: '"x label"'})) def test_y_label(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, width_in_pixels=1000, height_in_pixels=60, y_label='y label') .call_build_visualization_in_javascript()), self.get_params({0: '1000', 1: '60', 16: '"y label"'})) def test_full_data(self): visualization_data = self.make_adapter() full_data = "customFullDataFunction()" self.assertEqual((ScatterplotStructure(visualization_data, full_data=full_data) .call_build_visualization_in_javascript()), self.get_params({17: full_data})) def test_show_top_terms(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, show_top_terms=False) .call_build_visualization_in_javascript()), self.get_params({18: 'false'})) visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, show_top_terms=True) .call_build_visualization_in_javascript()), self.get_params({18: 'true'})) self.assertEqual((ScatterplotStructure(visualization_data) .call_build_visualization_in_javascript()), self.get_params({18: 'true'})) def test_show_neutral(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data) .call_build_visualization_in_javascript()), self.get_params({19: 'false'})) self.assertEqual((ScatterplotStructure(visualization_data, show_neutral=True) .call_build_visualization_in_javascript()), self.get_params({19: 'true'})) def test_get_tooltip_content(self): visualization_data = self.make_adapter() f = '''(function(x) {return 'Original X: ' + x.ox;})''' self.assertEqual((ScatterplotStructure(visualization_data, get_tooltip_content=f) .call_build_visualization_in_javascript()), self.get_params({20: f})) def test_x_axis_labels(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, x_axis_values=[1, 2, 3]) .call_build_visualization_in_javascript()), self.get_params({21: "[1, 2, 3]"})) def test_y_axis_labels(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, y_axis_values=[4, 5, 6]) .call_build_visualization_in_javascript()), self.get_params({22: "[4, 5, 6]"})) def test_color_func(self): visualization_data = self.make_adapter() color_func = 'function colorFunc(d) {var c = d3.hsl(d3.interpolateRdYlBu(d.x)); c.s *= d.y; return c;}' self.assertEqual((ScatterplotStructure(visualization_data, color_func=color_func) .call_build_visualization_in_javascript()), self.get_params({23: color_func})) def test_show_axes(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, show_axes=False) .call_build_visualization_in_javascript()), self.get_params({24: 'false'})) def test_show_extra(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, show_extra=True) .call_build_visualization_in_javascript()), self.get_params({25: 'true'})) def test_do_censor_points(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, do_censor_points=False) .call_build_visualization_in_javascript()), self.get_params({26: 'false'})) def test_center_label_over_points(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, center_label_over_points=True) .call_build_visualization_in_javascript()), self.get_params({27: 'true'})) def test_x_axis_labels_over_points(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, x_axis_labels=['Lo', 'Hi']) .call_build_visualization_in_javascript()), self.get_params({28: '["Lo", "Hi"]'})) def test_y_axis_labels_over_points(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, y_axis_labels=['Lo', 'Hi']) .call_build_visualization_in_javascript()), self.get_params({29: '["Lo", "Hi"]'})) def test_topic_model_preview_size(self): visualization_data = self.make_adapter() self.assertEqual((ScatterplotStructure(visualization_data, topic_model_preview_size=20) .call_build_visualization_in_javascript()), self.get_params({30: '20'})) def test_vertical_lines(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, vertical_lines=[20, 31]) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({31: '[20, 31]'})) def test_horizontal_line_y_position(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, horizontal_line_y_position=0) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({32: '0'})) def test_vertical_line_x_position(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, vertical_line_x_position=3) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({33: '3'})) def test_unifed_context(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, unified_context=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({34: 'true'})) def test_show_category_headings(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, show_category_headings=False) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({35: 'false'})) def test_show_cross_axes(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, show_cross_axes=False) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({36: 'false'})) def test_div_name(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, div_name='divvydivvy') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({37: '"divvydivvy"'})) def test_alternative_term_func(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, alternative_term_func='(function(termDict) {return true;})') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({38: '(function(termDict) {return true;})'})) def test_include_all_contexts(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, include_all_contexts=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({39: 'true'})) def test_show_axes_and_cross_hairs(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, show_axes_and_cross_hairs=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({40: 'true'})) def test_x_axis_values_format(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, x_axis_values_format=".4f") .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({41: '".4f"'})) def test_y_axis_values_format(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, y_axis_values_format=".5f") .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({42: '".5f"'})) def test_match_full_line(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, match_full_line=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({43: 'true'})) def test_max_overlapping(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, max_overlapping=10) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({44: '10'})) def test_show_corpus_stats(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, show_corpus_stats=False) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({45: 'false'})) def test_sort_doc_labels_by_name(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, sort_doc_labels_by_name=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({46: 'true'})) def test_always_jump(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, always_jump=False) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({47: 'false'})) def test_highlight_selected_category(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, highlight_selected_category=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({48: 'true'})) def test_show_diagonal(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, show_diagonal=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({49: 'true'})) def test_use_global_scale(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, use_global_scale=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({50: 'true'})) def test_enable_term_category_description(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, enable_term_category_description=False) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({51: 'false'})) def test_get_custom_term_html(self): visualization_data = self.make_adapter() html = '(function(x) {return "Term: " + x.term})' params = (ScatterplotStructure( visualization_data, get_custom_term_html=html ).call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({52: html})) def test_header_names(self): visualization_data = self.make_adapter() header_names = {'upper': 'Upper Header Name', 'lower': 'Lower Header Name'} params = (ScatterplotStructure( visualization_data, header_names=header_names ).call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params( {53: '''{"upper": "Upper Header Name", "lower": "Lower Header Name"}'''} )) def test_header_sorting_algos(self): visualization_data = self.make_adapter() header_sorting_algos = {'upper': '(function(a, b) {return b.s - a.s})', 'lower': '(function(a, b) {return a.s - b.s})'} params = (ScatterplotStructure( visualization_data, header_sorting_algos=header_sorting_algos ).call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params( {54: '''{"lower": (function(a, b) {return a.s - b.s}), "upper": (function(a, b) {return b.s - a.s})}'''} )) def test_ignore_categories(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, ignore_categories=True) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({55: 'true'})) def test_background_labels(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, background_labels=[ {'Text': 'Topic 0', 'X': 0.5242579971278757, 'Y': 0.8272937510221724}, {'Text': 'Topic 1', 'X': 0.7107755717675702, 'Y': 0.5034326824672314}, {'Text': 'Topic 2', 'X': 0.09014690078982, 'Y': 0.6261596586530888}]) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params( {56: '[{"Text": "Topic 0", "X": 0.5242579971278757, "Y": 0.8272937510221724}, ' '{"Text": "Topic 1", "X": 0.7107755717675702, "Y": 0.5034326824672314}, ' '{"Text": "Topic 2", "X": 0.09014690078982, "Y": 0.6261596586530888}]'})) def test_label_priority_column(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, label_priority_column='LabelPriority') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({57: '"LabelPriority"'})) def test_text_color_column(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, text_color_column='TextColor') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({58: '"TextColor"'})) def test_suppress_label_column(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, suppress_text_column='Suppress') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({59: '"Suppress"'})) def test_background_color(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, background_color='#444444') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({60: '"#444444"'})) def test_censor_point_column(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, censor_point_column='CensorPoint') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({61: '"CensorPoint"'})) def test_right_order_column(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, right_order_column='Priority') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({62: '"Priority"'})) def test_sentence_piece(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, subword_encoding='RoBERTa') .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({63: '"RoBERTa"'})) def test_top_terms_length(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, top_terms_length=5) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({64: '5'})) def test_top_terms_left_buffer(self): visualization_data = self.make_adapter() params = (ScatterplotStructure(visualization_data, top_terms_left_buffer=10) .call_build_visualization_in_javascript()) self.assertEqual(params, self.get_params({65: '10'}))
apache-2.0
3,404,248,226,562,957,300
51.001802
119
0.603812
false
opencord/voltha
voltha/adapters/microsemi_olt/PAS5211.py
1
38285
# # Copyright 2017 the original author or authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ PAS5211 scapy structs used for interaction with Ruby """ import struct from scapy.fields import LEShortField, Field, LEIntField, LESignedIntField, FieldLenField, FieldListField, PacketField, \ ByteField, StrFixedLenField, ConditionalField, StrField, MACField, LELongField, LenField, StrLenField from scapy.layers.l2 import DestMACField, ETHER_ANY, Ether from scapy.packet import Packet, bind_layers from scapy.utils import lhex from scapy.volatile import RandSInt from scapy.layers.ntp import XLEShortField from voltha.adapters.microsemi_olt.PAS5211_constants import PON_ENABLE, PON_PORT_PON, PON_FALSE, PON_TRUE from voltha.extensions.omci.omci_frame import OmciFrame """ PAS5211 Constants """ # TODO get range from olt_version message CHANNELS = range(0, 4) PORTS = range(1, 129) class XLESignedIntField(Field): def __init__(self, name, default): Field.__init__(self, name, default, "<i") def randval(self): return RandSInt() def i2repr(self, pkt, x): return lhex(self.i2h(pkt, x)) class LESignedShortField(Field): def __init__(self, name, default): Field.__init__(self, name, default, "<h") class PAS5211FrameHeader(Packet): name = "PAS5211FrameHeader" fields_desc = [ LEShortField("part", 1), LEShortField("total_parts", 1), LEShortField("size", 0), XLESignedIntField("magic_number", 0x1234ABCD) ] class PAS5211MsgHeader(Packet): name = "PAS5211MsgHeader" fields_desc = [ LEIntField("sequence_number", 0), XLEShortField("opcode", 0), LEShortField("event_type", 0), LESignedShortField("channel_id", -1), LESignedShortField("onu_id", -1), LESignedIntField("onu_session_id", -1) ] class PAS5211Msg(Packet): opcode = "Must be filled by subclass" pass class PAS5211MsgGetProtocolVersion(PAS5211Msg): opcode = 2 name = "PAS5211MsgGetProtocolVersion" fields_desc = [] class PAS5211MsgGetProtocolVersionResponse(PAS5211Msg): name = "PAS5211MsgGetProtocolVersionResponse" fields_desc = [ LEShortField("major_hardware_version", 0), LEShortField("minor_hardware_version", 0), LEShortField("major_pfi_version", 0), LEShortField("minor_pfi_version", 0) ] class PAS5211MsgGetOltVersion(PAS5211Msg): opcode = 3 name = "PAS5211MsgGetOltVersion" fields_desc = [] class PAS5211MsgGetOltVersionResponse(PAS5211Msg): name = "PAS5211MsgGetOltVersionResponse" fields_desc = [ LEShortField("major_firmware_version", 0), LEShortField("minor_firmware_version", 0), LEShortField("build_firmware_version", 0), LEShortField("maintenance_firmware_version", 0), LEShortField("major_hardware_version", 0), LEShortField("minor_hardware_version", 0), LEIntField("system_port_mac_type", 0), FieldLenField("channels_supported", 0, fmt="<H"), LEShortField("onus_supported_per_channel", 0), LEShortField("ports_supported_per_channel", 0), LEShortField("alloc_ids_supported_per_channel", 0), FieldListField("critical_events_counter", [0, 0, 0, 0], LEIntField("entry", 0), count_from=lambda pkt: pkt.channels_supported), FieldListField("non_critical_events_counter", [0, 0, 0, 0], LEIntField("entry", 0), count_from=lambda pkt: pkt.channels_supported) ] class SnrBurstDelay(Packet): name = "SnrBurstDelay" fields_desc = [ LEShortField("timer_delay", None), LEShortField("preamble_delay", None), LEShortField("delimiter_delay", None), LEShortField("burst_delay", None) ] def extract_padding(self, p): return "", p class RngBurstDelay(Packet): name = "SnrBurstDelay" fields_desc = [ LEShortField("timer_delay", None), LEShortField("preamble_delay", None), LEShortField("delimiter_delay", None) ] def extract_padding(self, p): return "", p class BurstTimingCtrl(Packet): name = "BurstTimingCtrl" fields_desc = [ PacketField("snr_burst_delay", None, SnrBurstDelay), PacketField("rng_burst_delay", None, RngBurstDelay), LEShortField("burst_delay_single", None), LEShortField("burst_delay_double", None) ] def extract_padding(self, p): return "", p class GeneralOpticsParams(Packet): name = "GeneralOpticsParams" fields_desc = [ ByteField("laser_reset_polarity", None), ByteField("laser_sd_polarity", None), ByteField("sd_source", None), ByteField("sd_hold_snr_ranging", None), ByteField("sd_hold_normal", None), ByteField("reset_type_snr_ranging", None), ByteField("reset_type_normal", None), ByteField("laser_reset_enable", None), ] def extract_padding(self, p): return "", p class ResetValues(Packet): name = "ResetDataBurst" fields_desc = [ ByteField("bcdr_reset_d2", None), ByteField("bcdr_reset_d1", None), ByteField("laser_reset_d2", None), ByteField("laser_reset_d1", None) ] def extract_padding(self, p): return "", p class DoubleResetValues(Packet): name = "ResetDataBurst" fields_desc = [ ByteField("bcdr_reset_d4", None), ByteField("bcdr_reset_d3", None), ByteField("laser_reset_d4", None), ByteField("laser_reset_d3", None) ] def extract_padding(self, p): return "", p class ResetTimingCtrl(Packet): name = "ResetTimingCtrl" fields_desc = [ PacketField("reset_data_burst", None, ResetValues), PacketField("reset_snr_burst", None, ResetValues), PacketField("reset_rng_burst", None, ResetValues), PacketField("single_reset", None, ResetValues), PacketField("double_reset", None, DoubleResetValues), ] def extract_padding(self, p): return "", p class PreambleParams(Packet): name = "PreambleParams" fields_desc = [ ByteField("correlation_preamble_length", None), ByteField("preamble_length_snr_rng", None), ByteField("guard_time_data_mode", None), ByteField("type1_size_data", None), ByteField("type2_size_data", None), ByteField("type3_size_data", None), ByteField("type3_pattern", None), ByteField("delimiter_size", None), ByteField("delimiter_byte1", None), ByteField("delimiter_byte2", None), ByteField("delimiter_byte3", None) ] def extract_padding(self, p): return "", p class PAS5211MsgSetOltOptics(PAS5211Msg): opcode = 106 name = "PAS5211MsgSetOltOptics" fields_desc = [ PacketField("burst_timing_ctrl", None, BurstTimingCtrl), PacketField("general_optics_params", None, GeneralOpticsParams), ByteField("reserved1", 0), ByteField("reserved2", 0), ByteField("reserved3", 0), PacketField("reset_timing_ctrl", None, ResetTimingCtrl), ByteField("voltage_if_mode", None), PacketField("preamble_params", None, PreambleParams), ByteField("reserved4", 0), ByteField("reserved5", 0), ByteField("reserved6", 0) ] class PAS5211MsgSetOltOpticsResponse(PAS5211Msg): name = "PAS5211MsgSetOltOpticsResponse" fields_desc = [] class PAS5211MsgSetOpticsIoControl(PAS5211Msg): opcode = 108 name = "PAS5211MsgSetOpticsIoControl" fields_desc = [ ByteField("i2c_clk", None), ByteField("i2c_data", None), ByteField("tx_enable", None), ByteField("tx_fault", None), ByteField("tx_enable_polarity", None), ByteField("tx_fault_polarity", None), ] class PAS5211MsgSetOpticsIoControlResponse(PAS5211Msg): name = "PAS5211MsgSetOpticsIoControlResponse" fields_desc = [] def extract_padding(self, p): return "", p class PAS5211MsgStartDbaAlgorithm(PAS5211Msg): opcode = 55 name = "PAS5211MsgStartDbaAlgorithm" fields_desc = [ LEShortField("size", 0), ByteField("initialization_data", None) ] class PAS5211MsgStartDbaAlgorithmResponse(PAS5211Msg): name = "PAS5211MsgStartDbaAlgorithmResponse" opcode = 10295 fields_desc = [] class PAS5211MsgSetGeneralParam(PAS5211Msg): opcode = 164 name = "PAS5211MsgSetGeneralParam" fields_desc = [ LEIntField("parameter", None), LEIntField("reserved", 0), LEIntField("value", None) ] class PAS5211MsgSetGeneralParamResponse(PAS5211Msg): name = "PAS5211MsgSetGeneralParamResponse" fields_desc = [] class PAS5211MsgGetGeneralParam(PAS5211Msg): opcode = 165 name = "PAS5211MsgGetGeneralParam" fields_desc = [ LEIntField("parameter", None), LEIntField("reserved", 0), ] class PAS5211MsgGetGeneralParamResponse(PAS5211Msg): name = "PAS5211MsgGetGeneralParamResponse" fields_desc = [ LEIntField("parameter", None), LEIntField("reserved", 0), LEIntField("value", None) ] class PAS5211MsgGetDbaMode(PAS5211Msg): opcode = 57 name = "PAS5211MsgGetDbaMode" fields_desc = [] class PAS5211MsgGetDbaModeResponse(PAS5211Msg): name = "PAS5211MsgGetDbaModeResponse" fields_desc = [ LEIntField("dba_mode", None), ] class PAS5211MsgAddOltChannel(PAS5211Msg): opcode = 4 name = "PAS5211MsgAddOltChannel" fields_desc = [ ] class PAS5211MsgAddOltChannelResponse(PAS5211Msg): name = "PAS5211MsgAddOltChannelResponse" fields_desc = [ ] class PAS5211MsgSetAlarmConfig(PAS5211Msg): opcode = 48 name = "PAS5211MsgSetAlarmConfig" fields_desc = [ LEShortField("type", None), LEShortField("activate", None), LEIntField("parameter1", None), LEIntField("parameter2", None), LEIntField("parameter3", None), LEIntField("parameter4", None) ] class PAS5211MsgSetOltChannelActivationPeriod(PAS5211Msg): opcode = 11 name = "PAS5211MsgSetOltChannelActivationPeriod" fields_desc = [ LEIntField("activation_period", None) ] class PAS5211MsgSetOltChannelActivationPeriodResponse(PAS5211Msg): name = "PAS5211MsgSetOltChannelActivationPeriodResponse" fields_desc = [] class PAS5211MsgSetAlarmConfigResponse(PAS5211Msg): name = "PAS5211MsgSetAlarmConfigResponse" fields_desc = [] class PAS5211MsgSendCliCommand(PAS5211Msg): opcode = 15 name = "PAS5211MsgSendCliCommand" fields_desc = [ FieldLenField("size", None, fmt="<H", length_of="command"), StrField("command", "") ] class PAS5211MsgSwitchToInboundMode(PAS5211Msg): opcode = 0xec name = "PAS5211MsgSwitchToInboundMode" fields_desc = [ MACField("mac", None), LEShortField("mode", 0) ] class PAS5211MsgGetActivationAuthMode(PAS5211Msg): opcode = 145 name = "PAS5211MsgGetActivationAuthMode" fields_desc = [ LEShortField("nothing", 0) # no idea why this is here ] class PAS5211MsgGetActivationAuthModeResponse(PAS5211Msg): opcode = 10385 name = "PAS5211MsgGetActivationAuthModeResponse" fields_desc = [ LEShortField("mode", 0), LEShortField("reserved", 0), ] class PAS5211MsgSetOnuOmciPortId(PAS5211Msg): opcode = 41 name = "PAS5211MsgSetOnuOmciPortId" fields_desc = [ LEShortField("port_id", 0), LEShortField("activate", PON_ENABLE) ] class PAS5211MsgSetOnuOmciPortIdResponse(PAS5211Msg): opcode = 10281 name = "PAS5211MsgSetOnuOmciPortIdResponse" fields_desc = [] class PAS5211MsgGetLogicalObjectStatus(PAS5211Msg): opcode = 223 name = "PAS5211MsgGetLogicalObjectStatus" fields_desc = [ LEIntField("type", None), LEIntField("value", None) ] class PAS5211MsgGetLogicalObjectStatusResponse(PAS5211Msg): opcode = 10463 name = "PAS5211MsgGetLogicalObjectStatusResponse" fields_desc = [ LEIntField("type", None), LEIntField("value", None), FieldLenField("return_length", None, fmt="<H", length_of="return_value"), LEIntField("return_value", "") ] class PAS5211MsgSetOnuAllocId(PAS5211Msg): opcode = 8 name = "PAS5211MsgSetOnuAllocId" fields_desc = [ LEShortField("alloc_id", None), LEShortField("allocate", None) ] class PAS5211MsgSetOnuAllocIdResponse(PAS5211Msg): opcode = 10248 name = "PAS5211MsgSetOnuAllocIdResponse" fields_desc = [] class PAS5211MsgSendDbaAlgorithmMsg(PAS5211Msg): opcode = 47 name = "PAS5211MsgSendDbaAlgorithmMsg" fields_desc = [ # LEShortField("id", None), FieldLenField("size", None, fmt="<H", length_of="data"), StrLenField("data", "", length_from=lambda x: x.size) ] class PAS5211MsgSendDbaAlgorithmMsgResponse(PAS5211Msg): opcode = 10287 name = "PAS5211MsgSendDbaAlgorithmMsgResponse" fields_desc = [] class PAS5211MsgSetPortIdConfig(PAS5211Msg): opcode = 18 name = "PAS5211MsgSetPortIdConfig" fields_desc = [ LEShortField("port_id", None), LEShortField("activate", PON_ENABLE), LEShortField("alloc_id", None), LEIntField("type", None), LEIntField("destination", None), # Is this the CNI port # if yes then values are 0-11 (for ruby) LEShortField("reserved", None) ] class PAS5211MsgSetPortIdConfigResponse(PAS5211Msg): opcode = 10258 name = "PAS5211MsgSetPortIdConfigResponse" fields_desc = [] class PAS5211MsgGetOnuIdByPortId(PAS5211Msg): opcode = 196 name = "PAS5211MsgGetOnuIdByPortId" fields_desc = [ LEShortField("port_id", None), LEShortField("reserved", 0) ] class PAS5211MsgGetOnuIdByPortIdResponse(PAS5211Msg): opcode = 196 name = "PAS5211MsgGetOnuIdByPortIdResponse" fields_desc = [ LEShortField("valid", None), LEShortField("onu_id", None) ] class PAS5211SetVlanUplinkConfiguration(PAS5211Msg): opcode = 39 name = "PAS5211SetVlanUplinkConfiguration" fields_desc = [ LEShortField("port_id", None), LEShortField("pvid_config_enabled", None), LEShortField("min_cos", None), LEShortField("max_cos", None), LEIntField("de_bit", None), LEShortField("reserved", 0) ] class PAS5211SetVlanUplinkConfigurationResponse(PAS5211Msg): opcode = 10279 name = "PAS5211SetVlanUplinkConfigurationResponse" fields_desc = [] class PAS5211GetOnuAllocs(PAS5211Msg): opcode = 9 name = "PAS5211GetOnuAllocs" fields_desc = [ LEShortField("nothing", None) # It's in the PMC code... so yeah. ] class PAS5211GetOnuAllocsResponse(PAS5211Msg): opcode = 9 name = "PAS5211GetOnuAllocsResponse" fields_desc = [ LEShortField("allocs_number", None), FieldListField("alloc_ids", None, LEShortField("alloc_id", None)) ] class PAS5211GetSnInfo(PAS5211Msg): opcode = 7 name = "PAS5211GetSnInfo" fields_desc = [ StrFixedLenField("serial_number", None, 8) ] class PAS5211GetSnInfoResponse(PAS5211Msg): opcode = 7 name = "PAS5211GetSnInfoResponse" fields_desc = [ StrFixedLenField("serial_number", None, 8), LEShortField("found", None), LEShortField("type", None), LEShortField("onu_state", None), LELongField("equalization_delay", None), LEShortField("reserved", None) ] class PAS5211GetOnusRange(PAS5211Msg): opcode = 116 name = "PAS5211GetOnusRange" fields_desc = [ LEShortField("nothing", None) ] class PAS5211GetOnusRangeResponse(PAS5211Msg): opcode = 116 name = "PAS5211GetOnusRangeResponse" fields_desc = [ LEIntField("min_distance", None), LEIntField("max_distance", None), LEIntField("actual_min_distance", None), LEIntField("actual_max_distance", None) ] class PAS5211GetPortIdConfig(PAS5211Msg): opcode = 19 name = "PAS5211GetPortIdConfig" fields_desc = [ LEShortField("port_id", None), LEShortField("reserved", None) ] class PAS5211GetPortIdConfigResponse(PAS5211Msg): opcode = 19 name = "PAS5211GetPortIdConfigResponse" fields_desc = [ LEShortField("activate", None), LEShortField("encryption_state", None), LEShortField("alloc_id", None), LEShortField("type", None), LEShortField("destination", None), LEShortField("reserved", None), ] class PAS5211SetSVlanAtConfig(PAS5211Msg): opcode = 63 name = "PAS5211SetSVlanAtConfig" fields_desc = [ LEShortField("svlan_id", None), LEShortField("forwarding_mode", None), LEShortField("use_svlan", None), LEShortField("use_cvlan", None), LEShortField("use_pbits", None), LEShortField("discard_unknown", None), ] class PAS5211SetSVlanAtConfigResponse(PAS5211Msg): opcode = 63 name = "PAS5211SetSVlanAtConfigResponse" fields_desc = [] class PAS5211SetUplinkVlanHandl(PAS5211Msg): opcode = 34 name = "PAS5211SetUplinkVlanHandl" fields_desc = [ LEShortField("source_port_id", None), LEShortField("primary_vid", None), LEShortField("pvid_config_enabled", None), LEShortField("svlan_tag_operation", None), LEShortField("cvlan_tag_operation", None), LEShortField("new_svlan_tag", None), LEShortField("new_cvlan_tag", None), LEShortField("destination", None) ] class PAS5211SetUplinkVlanHandlResponse(PAS5211Msg): opcode = 34 name = "PAS5211SetUplinkVlanHandlResponse" fields_desc = [] class PAS5211SetVlanGenConfig(PAS5211Msg): opcode = 43 name = "PAS5211SetVlanGenConfig" fields_desc = [ LEShortField("direction", None), LEShortField("extended_svlan_type", None), LEShortField("insertion_svlan_ethertype", None), LEShortField("extended_cvlan_type", None), LEShortField("insertion_cvlan_ethertype", None), LEShortField("pon_pcp_code", None), LEShortField("cni_pcp_code", None), LEShortField("reserved", None) ] class PAS5211SetVlanGenConfigResponse(PAS5211Msg): opcode = 43 name = "PAS5211SetVlanGenConfigResponse" fields_desc = [] class PAS5211SetVlanDownConfig(PAS5211Msg): opcode = 32 name = "PAS5211SetVlanDownConfig" fields_desc = [ LEShortField("svlan_id", None), LEShortField("double_tag_handling", None), LEShortField("vlan_priority_handling", None) ] class PAS5211SetVlanDownConfigResponse(PAS5211Msg): opcode = 32 name = "PAS5211SetVlanDownConfigResponse" fields_desc = [] class PAS5211SetDownVlanHandl(PAS5211Msg): opcode = 27 name = "PAS5211SetDownVlanHandl" fields_desc = [ LEShortField("svlan_tag", None), LEShortField("cvlan_tag", None), LEShortField("double_tag_handling", None), LEShortField("priority_handling", None), LEShortField("input_priority", None), LEShortField("svlan_tag_operation", None), LEShortField("cvlan_tag_operation", None), LEShortField("port_id", None), LEShortField("new_cvlan_tag", None), LEShortField("destination", None), LEShortField("output_vlan_prio_handle", None), LEShortField("output_priority", None) ] class PAS5211SetDownVlanHandlResponse(PAS5211Msg): opcode = 27 name = "PAS5211SetDownVlanHandlResponse" fields_desc = [] class Frame(Packet): pass class PAS5211MsgSendFrame(PAS5211Msg): opcode = 42 name = "PAS5211MsgSendFrame" fields_desc = [ FieldLenField("length", None, fmt="<H", length_of="frame"), LEShortField("port_type", PON_PORT_PON), LEShortField("port_id", 0), LEShortField("management_frame", PON_FALSE), ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame == PON_FALSE), ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame == PON_TRUE) ] def extract_padding(self, p): return "", p class PAS5211MsgSendFrameResponse(PAS5211Msg): name = "PAS5211MsgSendFrameResponse" fields_desc = [] class PAS5211Event(PAS5211Msg): opcode = 12 class PAS5211EventFrameReceived(PAS5211Event): name = "PAS5211EventFrameReceived" fields_desc = [ FieldLenField("length", None, length_of="frame", fmt="<H"), LEShortField("port_type", PON_PORT_PON), LEShortField("port_id", 0), LEShortField("management_frame", PON_FALSE), LEShortField("classification_entity", None), LEShortField("l3_offset", None), LEShortField("l4_offset", None), LEShortField("ignored", 0), # TODO these do receive values, but there is no code in PMC using it ConditionalField(PacketField("frame", None, Packet), lambda pkt: pkt.management_frame == PON_FALSE), ConditionalField(PacketField("frame", None, OmciFrame), lambda pkt: pkt.management_frame == PON_TRUE) ] class PAS5211EventDbaAlgorithm(PAS5211Event): name = "PAS5211EventDbaAlgorithm" fields_desc = [ FieldLenField("size", None, fmt="<H", length_of="data"), StrLenField("data", "", length_from=lambda x: x.size) ] class PAS5211EventOnuActivation(PAS5211Event): name = "PAS5211EventOnuActivation" event_type = 1 fields_desc = [ StrFixedLenField("serial_number", None, length=8), LEIntField("equalization_period", None) ] class PAS5211EventOnuDeactivation(PAS5211Event): name = "PAS5211EventOnuDeactivation" event_type = 2 fields_desc = [ LEShortField("code", None) ] class PAS5211EventLogMsg(PAS5211Event): name = "PAS5211EventLogMsg" event_type = 3 fields_desc = [] class PAS5211EventFWGeneralPrint(PAS5211Event): name = "PAS5211EventFWGeneralPrint" event_type = 4 fields_desc = [] class PAS5211EventFWTracePrint(PAS5211Event): name = "PAS5211EventFWTracePrint" event_type = 5 fields_desc = [] class PAS5211EventStartEncryption(PAS5211Event): name = "PAS5211EventStartEncryption" event_type = 6 fields_desc = [] class PAS5211EventStopEncryption(PAS5211Event): name = "PAS5211EventStopEncryption" event_type = 7 fields_desc = [] class PAS5211EventUpdateEncryption(PAS5211Event): name = "PAS5211EventUpdateEncryption" event_type = 8 fields_desc = [] class PAS5211EventAlarmNotification(PAS5211Event): name = "PAS5211EventAlarmNotification" event_type = 9 fields_desc = [ LEShortField("code", None), LEIntField("parameter1", None), LEIntField("parameter2", None), LEIntField("parameter3", None), LEIntField("parameter4", None) ] class PAS5211EventDBAAlgorithmEvent(PAS5211Event): name = "PAS5211EventDBAAlgorithmEvent" event_type = 11 fields_desc = [] class PAS5211EventOLTReset(PAS5211Event): name = "PAS5211EventOLTReset" event_type = 12 fields_desc = [] class PAS5211EventOnuSleepMode(PAS5211Event): name = "PAS5211EventOnuSleepMode" event_type = 13 fields_desc = [] class PAS5211EventAssignAllocId(PAS5211Event): name = "PAS5211EventAssignAllocId" event_type = 14 fields_desc = [] class PAS5211EventConfigOMCIPort(PAS5211Event): name = "PAS5211EventConfigOMCIPort" event_type = 15 fields_desc = [] class PAS5211EventPloamMessageReceived(PAS5211Event): name = "PAS5211EventPloamMessageReceived" event_type = 17 fields_desc = [] class PAS5211EventLoadOLTBinaryCompleted(PAS5211Event): name = "PAS5211EventLoadOLTBinaryCompleted" event_type = 18 fields_desc = [] class PAS5211EventMasterOLTFail(PAS5211Event): name = "PAS5211EventMasterOLTFail" event_type = 19 fields_desc = [] class PAS5211EventRedundantSwitchOverStatus(PAS5211Event): name = "PAS5211EventRedundantSwitchOverStatus" event_type = 20 fields_desc = [] class PAS5211EventSyncOLTData(PAS5211Event): name = "PAS5211EventSyncOLTData" event_type = 21 fields_desc = [] class PAS5211EventEQDChange(PAS5211Event): name = "PAS5211EventEQDChange" event_type = 22 fields_desc = [] class PAS5211EventXAUIStatusNotification(PAS5211Event): name = "PAS5211EventXAUIStatusNotification" event_type = 23 fields_desc = [] class PAS5211EventUnauthenticatedONU(PAS5211Event): name = "PAS5211EventUnauthenticatedONU" event_type = 24 fields_desc = [] class PAS5211EventFalseQFullReported(PAS5211Event): name = "PAS5211EventFalseQFullReported" event_type = 25 fields_desc = [] class PAS5211EventOpticalModuleIndication(PAS5211Event): name = "PAS5211EventOpticalModuleIndication" event_type = 27 fields_desc = [] class PAS5211EventActivationFailure(PAS5211Event): name = "PAS5211EventActivationFailure" event_type = 28 fields_desc = [] class PAS5211EventBipError(PAS5211Event): name = "PAS5211EventBipError" event_type = 29 fields_desc = [] class PAS5211EventREIError(PAS5211Event): name = "PAS5211EventREIError" event_type = 30 fields_desc = [] class PAS5211EventRDNMultiONUFailure(PAS5211Event): name = "PAS5211EventRDNMultiONUFailure" event_type = 31 fields_desc = [] class PAS5211EventUnexpectedSN(PAS5211Event): name = "PAS5211EventUnexpectedSN" event_type = 32 fields_desc = [] class PAS5211EventRDNSwitchOverONUResult(PAS5211Event): name = "PAS5211EventRDNSwitchOverONUResult" event_type = 33 fields_desc = [] class PAS5211EventGMacMalfucntionSuspected(PAS5211Event): name = "PAS5211EventGMacMalfucntionSuspected" event_type = 34 fields_desc = [] class PAS5211GetPortIdDownstreamPolicingConfig(PAS5211Msg): opcode = 82 name = "PAS5211GetPortIdDownstreamPolicingConfig" fields_desc = [ LEShortField("port_id", None), LEShortField("reserved", None)] class PAS5211GetPortIdDownstreamPolicingConfigResponse(PAS5211Msg): opcode = 82 name = "PAS5211GetPortIdDownstreamPolicingConfigResponse" fields_desc = [ LEIntField("committed_bandwidth", None), LEIntField("excessive_bandwidth", None), LEShortField("committed_burst_limit", None), LEShortField("excessive_burst_limit", None), LEShortField("ds_policing_config_id", None), LEShortField("reserved", None)] class PAS5211RemoveDownstreamPolicingConfig(PAS5211Msg): opcode = 76 name = "PAS5211RemoveDownstreamPolicingConfig" fields_desc = [ LEShortField("policing_config_id", None), LEShortField("reserved", None)] class PAS5211RemoveDownstreamPolicingConfigResponse(PAS5211Msg): opcode = 76 name = "PAS5211RemoveDownstreamPolicingConfigResponse" fields_desc = [] class PAS5211SetPortIdPolicingConfig(PAS5211Msg): opcode = 80 name = "PAS5211SetPortIdPolicingConfig" fields_desc = [ LEShortField("direction", None), LEShortField("port_id", None), LEShortField("policing_config_id", None), LEShortField("reserved", None)] class PAS5211SetPortIdPolicingConfigResponse(PAS5211Msg): opcode = 80 name = "PAS5211SetPortIdPolicingConfigResponse" fields_desc = [] class PAS5211UnsetPortIdPolicingConfig(PAS5211Msg): opcode = 81 name = "PAS5211UnsetSetPortIdPolicingConfig" fields_desc = [ LEShortField("direction", None), LEShortField("port_id", None)] class PAS5211UnsetPortIdPolicingConfigResponse(PAS5211Msg): opcode = 81 name = "PAS5211UnsetSetPortIdPolicingConfigResponse" fields_desc = [] class PAS5211SetDownstreamPolicingConfig(PAS5211Msg): opcode = 74 name = "PAS5211SetDownstreamPolicingConfig" fields_desc = [ LEIntField("committed_bandwidth", None), LEIntField("excessive_bandwidth", None), LEShortField("committed_burst_limit", None), LEShortField("excessive_burst_limit", None)] class PAS5211SetDownstreamPolicingConfigResponse(PAS5211Msg): opcode = 74 name = "PAS5211SetDownstreamPolicingConfigResponse" fields_desc = [ LEShortField("policing_config_id", None), LEShortField("reserved", None)] class PAS5211SetUpstreamPolicingConfig(PAS5211Msg): opcode = 77 name = "PAS5211SetUpstreamPolicingConfig" fields_desc = [ LEIntField("bandwidth", None), LEShortField("burst_limit", None), LEShortField("reserved", None)] class PAS5211SetUpstreamPolicingConfigResponse(PAS5211Msg): opcode = 77 name = "PAS5211SetDownstreamPolicingResponse" fields_desc = [ LEShortField("policing_config_id", None), LEShortField("reserved", None)] class PAS5211Dot3(Packet): name = "PAS5211Dot3" fields_desc = [DestMACField("dst"), MACField("src", ETHER_ANY), LenField("len", None, "H")] MIN_FRAME_SIZE = 60 def post_build(self, pkt, payload): pkt += payload size = ord(payload[4]) + (ord(payload[5]) << 8) length = size + 6 # this is a idiosyncracy of the PASCOMM protocol pkt = pkt[:12] + chr(length >> 8) + chr(length & 0xff) + pkt[14:] padding = self.MIN_FRAME_SIZE - len(pkt) if padding > 0: pkt = pkt + ("\x00" * padding) return pkt ''' This is needed in order to force scapy to use PAS5211Dot3 instead of the default Dot3 that the Ether class uses. ''' @classmethod def PAS_dispatch_hook(cls, _pkt=None, *args, **kargs): if _pkt and len(_pkt) >= 14: if struct.unpack("!H", _pkt[12:14])[0] <= 1500: return PAS5211Dot3 return cls Ether.dispatch_hook = PAS_dispatch_hook # bindings for messages received # fix for v2 of Microsemi OLT. bind_layers(Ether, PAS5211FrameHeader, type=0x0a00) bind_layers(PAS5211Dot3, PAS5211FrameHeader) bind_layers(PAS5211FrameHeader, PAS5211MsgHeader) bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersion, opcode=0x3000 | 2) bind_layers(PAS5211MsgHeader, PAS5211MsgGetProtocolVersionResponse, opcode=0x2800 | 2) bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersion, opcode=0x3000 | 3) bind_layers(PAS5211MsgHeader, PAS5211MsgGetOltVersionResponse, opcode=0x3800 | 3) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOptics, opcode=0x3000 | 106) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltOpticsResponse, opcode=0x2800 | 106) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControl, opcode=0x3000 | 108) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOpticsIoControlResponse, opcode=0x2800 | 108) bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParam, opcode=0x3000 | 164) bind_layers(PAS5211MsgHeader, PAS5211MsgSetGeneralParamResponse, opcode=0x2800 | 164) bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParam, opcode=0x3000 | 165) bind_layers(PAS5211MsgHeader, PAS5211MsgGetGeneralParamResponse, opcode=0x2800 | 165) bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannel, opcode=0x3000 | 4) bind_layers(PAS5211MsgHeader, PAS5211MsgAddOltChannelResponse, opcode=0x2800 | 4) bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfig, opcode=0x3000 | 48) bind_layers(PAS5211MsgHeader, PAS5211MsgSetAlarmConfigResponse, opcode=0x2800 | 48) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriod, opcode=0x3000 | 11) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOltChannelActivationPeriodResponse, opcode=0x2800 | 11) bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithm, opcode=0x3000 | 55) bind_layers(PAS5211MsgHeader, PAS5211MsgStartDbaAlgorithmResponse, opcode=0x2800 | 55) bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaMode, opcode=0x3000 | 57) bind_layers(PAS5211MsgHeader, PAS5211MsgGetDbaModeResponse, opcode=0x2800 | 57) bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrame, opcode=0x3000 | 42) bind_layers(PAS5211MsgHeader, PAS5211MsgSendFrameResponse, opcode=0x2800 | 42) bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthMode, opcode=0x3000 | 145) bind_layers(PAS5211MsgHeader, PAS5211MsgGetActivationAuthModeResponse, opcode=0x2800 | 145) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortId, opcode=0x3000 | 41) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuOmciPortIdResponse, opcode=0x2800 | 41) bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatus, opcode=0x3000 | 223) bind_layers(PAS5211MsgHeader, PAS5211MsgGetLogicalObjectStatusResponse, opcode=0x2800 | 223) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocId, opcode=0x3000 | 8) bind_layers(PAS5211MsgHeader, PAS5211MsgSetOnuAllocIdResponse, opcode=0x2800 | 8) bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsg, opcode=0x3000 | 47) bind_layers(PAS5211MsgHeader, PAS5211MsgSendDbaAlgorithmMsgResponse, opcode=0x2800 | 47) bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfig, opcode=0x3000 | 18) bind_layers(PAS5211MsgHeader, PAS5211MsgSetPortIdConfigResponse, opcode=0x2800 | 18) bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortId, opcode=0x3000 | 196) bind_layers(PAS5211MsgHeader, PAS5211MsgGetOnuIdByPortIdResponse, opcode=0x2800 | 196) bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfiguration, opcode=0x3000 | 39) bind_layers(PAS5211MsgHeader, PAS5211SetVlanUplinkConfigurationResponse, opcode=0x2800 | 39) bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocs, opcode=0x3000 | 9) bind_layers(PAS5211MsgHeader, PAS5211GetOnuAllocsResponse, opcode=0x2800 | 9) bind_layers(PAS5211MsgHeader, PAS5211GetSnInfo, opcode=0x3000 | 7) bind_layers(PAS5211MsgHeader, PAS5211GetSnInfoResponse, opcode=0x2800 | 7) bind_layers(PAS5211MsgHeader, PAS5211GetOnusRange, opcode=0x3000 | 116) bind_layers(PAS5211MsgHeader, PAS5211GetOnusRangeResponse, opcode=0x2800 | 116) bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfig, opcode=0x3000 | 19) bind_layers(PAS5211MsgHeader, PAS5211GetPortIdConfigResponse, opcode=0x2800 | 19) bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfig, opcode=0x3000 | 63) bind_layers(PAS5211MsgHeader, PAS5211SetSVlanAtConfigResponse, opcode=0x2800 | 63) bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandl, opcode=0x3000 | 34) bind_layers(PAS5211MsgHeader, PAS5211SetUplinkVlanHandlResponse, opcode=0x2800 | 34) bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfig, opcode=0x3000 | 43) bind_layers(PAS5211MsgHeader, PAS5211SetVlanGenConfigResponse, opcode=0x2800 | 43) bind_layers(PAS5211MsgHeader, PAS5211SetVlanDownConfig, opcode=0x3000 | 32) bind_layers(PAS5211MsgHeader, PAS5211SetVlanDownConfigResponse, opcode=0x2800 | 32) bind_layers(PAS5211MsgHeader, PAS5211SetDownVlanHandl, opcode=0x3000 | 27) bind_layers(PAS5211MsgHeader, PAS5211SetDownVlanHandlResponse, opcode=0x2800 | 27) bind_layers(PAS5211MsgHeader, PAS5211SetDownstreamPolicingConfig, opcode=0x3000 | 74) bind_layers(PAS5211MsgHeader, PAS5211SetDownstreamPolicingConfigResponse, opcode=0x2800 | 74) bind_layers(PAS5211MsgHeader, PAS5211SetUpstreamPolicingConfig, opcode=0x3000 | 77) bind_layers(PAS5211MsgHeader, PAS5211SetUpstreamPolicingConfigResponse, opcode=0x2800 | 77) bind_layers(PAS5211MsgHeader, PAS5211SetPortIdPolicingConfig, opcode=0x3000 | 80) bind_layers(PAS5211MsgHeader, PAS5211SetPortIdPolicingConfigResponse, opcode=0x2800 | 80) bind_layers(PAS5211MsgHeader, PAS5211UnsetPortIdPolicingConfig, opcode=0x3000 | 81) bind_layers(PAS5211MsgHeader, PAS5211UnsetPortIdPolicingConfigResponse, opcode=0x2800 | 81) bind_layers(PAS5211MsgHeader, PAS5211GetPortIdDownstreamPolicingConfig, opcode=0x3000 | 82) bind_layers(PAS5211MsgHeader, PAS5211GetPortIdDownstreamPolicingConfigResponse, opcode=0x2800 | 82) bind_layers(PAS5211MsgHeader, PAS5211RemoveDownstreamPolicingConfig, opcode=0x3000 | 76) bind_layers(PAS5211MsgHeader, PAS5211RemoveDownstreamPolicingConfigResponse, opcode=0x2800 | 76) # bindings for events received bind_layers(PAS5211MsgHeader, PAS5211EventOnuActivation, opcode=0x2800 | 12, event_type=1) bind_layers(PAS5211MsgHeader, PAS5211EventOnuDeactivation, opcode=0x2800 | 12, event_type=2) bind_layers(PAS5211MsgHeader, PAS5211EventFrameReceived, opcode=0x2800 | 12, event_type=10) bind_layers(PAS5211MsgHeader, PAS5211EventDbaAlgorithm, opcode=0x2800 | 12, event_type=11) bind_layers(PAS5211MsgHeader, PAS5211EventAlarmNotification, opcode=0x2800 | 12, event_type=9) bind_layers(PAS5211MsgHeader, PAS5211Event, opcode=0x2800 | 12) class Display(object): def __init__(self, pkts): self.pkts = pkts def show(self, seq): self.pkts[seq].show() def __getitem__(self, key): self.show(key) def walk(self, index=0): while index < len(self.pkts): self.show(index) try: input("(current packet - %s) Next packet?" % index) except Exception as e: pass index += 1 if __name__ == '__main__': from scapy.utils import rdpcap import sys import code packets = rdpcap(sys.argv[1]) p = Display(packets) def walk(index=0, interactive=True, channel=-1): if interactive is not True: for packet in packets: if PAS5211MsgHeader in packet: if PAS5211MsgGetOltVersion not in packet and PAS5211MsgGetOltVersionResponse not in packet: if channel is not -1: if packet[PAS5211MsgHeader].channel_id == channel: packet.show() else: packet.show() else: p.walk(index=index) code.interact(local=locals())
apache-2.0
3,870,633,677,886,018,000
30.984127
121
0.694371
false
Jufik/python-fixer
setup.py
1
1411
#!/usr/bin/env python import os from setuptools import setup, find_packages README = os.path.join(os.path.dirname(__file__), 'README.rst') # when running tests using tox, README.md is not found try: with open(README) as file: long_description = file.read() except Exception: long_description = '' setup( name='fixerio', version='1.0.0', description='A Python client for fixer.io API', long_description=long_description, url='https://github.com/Jufik/python-fixer', author='Julien Kieffer TISSIER', author_email='[email protected]', maintainer='Fabian Affolter', maintainer_email='[email protected]', license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], keywords='fixerio api currencies', packages=find_packages(), install_requires=['requests'], # test_suite='tests', )
mit
-952,621,933,555,990,000
31.068182
71
0.635011
false
atelier-cartographique/static-sectioner
sectioner/parser.py
1
2130
# Copyright (C) 2016 Pierre Marchand <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from html.parser import HTMLParser from html.entities import html5 class TemplateParser(HTMLParser): def __init__ (self): HTMLParser.__init__(self, convert_charrefs=True) def handle_starttag(self, tag, attrs): elem = '<{}'.format(tag) attrs_dict = dict() for attr in attrs: k, v = attr val = v if v.startswith('$'): ph = v[1:] if ph in self.template_data: val = self.template_data[ph] attrs_dict[k] = val elem += ' {}="{}"'.format(k, val) elem += '>' self.result.append(elem) if 'data-role' in attrs_dict: role = attrs_dict['data-role'] if role in self.template_data: self.result.append(self.template_data[role]) def handle_endtag(self, tag): self.result.append('</{}>'.format(tag)) def handle_data(self, data): self.result.append(data) def handle_comment(self, data): self.result.append('<!-- {} -->'.format(data)) def handle_entityref(self, name): self.result.append(html5[name]) def handle_decl(self, data): self.result.append('<!{}>'.format(data)) def apply_template (self, template, data): self.result = [] self.template_data = data self.feed(template) return ''.join(self.result)
agpl-3.0
-1,908,686,766,818,658,000
32.28125
74
0.615962
false
team-vigir/flexbe_behavior_engine
flexbe_mirror/src/flexbe_mirror/mirror_state.py
1
1445
#!/usr/bin/env python import rospy from rospy.exceptions import ROSInterruptException from flexbe_core import EventState from flexbe_core.proxy import ProxyPublisher, ProxySubscriberCached from std_msgs.msg import String, UInt8 class MirrorState(EventState): ''' This state will display its possible outcomes as buttons in the GUI and is designed in a way to be created dynamically. ''' def __init__(self, target_name, target_path, given_outcomes, outcome_autonomy): super(MirrorState, self).__init__(outcomes=given_outcomes) self.set_rate(100) self._target_name = target_name self._target_path = target_path self._outcome_topic = 'flexbe/mirror/outcome' self._pub = ProxyPublisher() self._sub = ProxySubscriberCached({self._outcome_topic: UInt8}) def execute(self, userdata): if self._sub.has_buffered(self._outcome_topic): msg = self._sub.get_from_buffer(self._outcome_topic) if msg.data < len(self.outcomes): rospy.loginfo("State update: %s > %s", self._target_name, self.outcomes[msg.data]) return self.outcomes[msg.data] try: self.sleep() except ROSInterruptException: print('Interrupted mirror sleep.') def on_enter(self, userdata): self._pub.publish('flexbe/behavior_update', String("/" + "/".join(self._target_path.split("/")[1:])))
bsd-3-clause
5,211,068,860,870,284,000
36.051282
109
0.652595
false
gregbillock/Spectrum-Access-System
src/prop/ehata/test/ehata_median_loss_test.py
2
1161
import os,sys sys.path.append(os.path.join(os.path.dirname(__file__), '..')) import ehata import csv import math distance = range(1,100) hb = 50 hmVals = [1.5, 3, 4.5, 6] region = ['Urban', 'Suburban'] for reg in region: loss = [] abmloss = [] with open('median_loss_%s.csv' % reg) as loss_file: rows = csv.reader(loss_file) for row in rows: loss.append(row) with open('median_abm_%s.csv' % reg) as loss_file: rows = csv.reader(loss_file) for row in rows: abmloss.append(row) for hmi in range(len(hmVals)): hm = hmVals[hmi] for disti in range(len(distance)): dist = distance[disti] median_loss, above_median_loss = ehata.ExtendedHata_MedianBasicPropLoss(3500, dist, hb, hm, reg) if math.fabs(float(loss[hmi][disti]) - median_loss) > .05: print('fail median loss on profile %d: %f vs %f' % (target, float(loss[hmi][disti]), median_loss)) exit() if math.fabs(float(abmloss[hmi][disti]) - above_median_loss) > .05: print('fail above-median loss on profile %d: %f vs %f' % (target, float(abmloss[hmi][disti]), above_median_loss)) exit() print 'PASS'
apache-2.0
-3,991,473,969,470,144,000
24.8
121
0.621878
false
better-dem/portal
widgets/forms.py
1
15560
from django import forms from django.core.exceptions import ValidationError import os import sys import json import traceback from django.http import HttpResponse import datetime api_key = os.environ["GOOGLE_MAPS_API_KEY"] JQUERY="https://code.jquery.com/jquery-1.12.4.js" # this is already included in all BDN pages ##### Begin Date Picking widget for use with the DateField # the builtin only works in Chrome... class DatePickerJQueryWidget(forms.Widget): class Media: css = { 'all': ("https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css",) } js = ("https://code.jquery.com/ui/1.12.1/jquery-ui.js",) def render(self, name, value, *args, **kwargs): input_name = name input_id = kwargs['attrs']['id'] value_str = "" if not value is None: if isinstance(value, unicode): value_str = value elif isinstance(value, datetime.date): value_str = str(value.month)+"/"+str(value.day)+"/"+str(value.year) render_html = "" render_html += "<input type='text' name='"+input_name+"' id='"+input_id+"' value='"+value_str+"' />" render_html += "<script type=\"text/javascript\">" render_html += "$( function() {" render_html += "$( \"#"+input_id+"\" ).datepicker();" render_html += "});" render_html += "</script>" return render_html def __init__(self, *args, **kwargs): super(DatePickerJQueryWidget, self).__init__(*args, **kwargs) def validate_polygon(point_array): if point_array is None: raise ValidationError("Error parsing point array", code="invalid") for point in point_array: if(len(point) != 2): raise ValidationError("Point must compose of latitude and longitude", code="invalid") for p in point: if not type(p) is float: raise ValidationError("One of the coordinates is not float", code='invalid') if len(point_array) < 3: raise ValidationError("At least three points required for polygon", code='invalid') def is_polygon(point_array_string): try: point_array = json.loads(point_array_string) validate_polygon(point_array) return True except: return False class ShowPointWidget(forms.Widget): """ Widget to display a polygon on a map. The polygon is not editable. The form field is disabled and should not be required. """ class Media: css = { 'all' : ('css/poly_style.css',) } js = ("js/maps_utils.js", "js/show_point.js", "https://maps.googleapis.com/maps/api/js?key={}".format(api_key),) def render(self, name, value, *args, **kwargs): div_id = 'poly_map_' + kwargs['attrs']['id'] input_name = name input_id = kwargs['attrs']['id'] if value is None: raise Exception("ShowPointWidget requires the polygon to be defined" + str(value)) render_html = """ <div class='map_widget' id="{}"></div> <input type='hidden' name='{}' id='{}' value='' /> <script type="text/javascript"> google.maps.event.addDomListener(window, 'load', show_point_map('{}', '{}', {}, {})); </script> """ render_html_with_id = render_html.format(div_id, input_name, input_id, div_id, input_id, value, 12) return render_html_with_id def __init__(self, *args, **kwargs): super(ShowPointWidget, self).__init__(*args, **kwargs) class ShowPointField(forms.Field): def __init__(self, required= True, widget=ShowPointWidget, label=None, initial=None, help_text="", validators=[], *args, **kwargs): super(ShowPointField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, validators=validators, *args, **kwargs) self.disabled = True def to_python(self, value): # Convert to expected python value (list of lists of latlngs) value = super(ShowPointField, self).to_python(value) try: json_array = json.loads(value) except: raise ValidationError("Unable to parse input: '{}'".format(value), code="invalid") return json_array def validate(self, value): super(ShowPointField, self).validate(value) def widget_attrs(self, widget): attrs = super(ShowPointField, self).widget_attrs(widget) return attrs ##### Begin ShowPolygon widget and field class ShowPolygonWidget(forms.Widget): """ Widget to display a polygon on a map. The polygon is not editable. The form field is disabled and should not be required. """ class Media: css = { 'all' : ('css/poly_style.css',) } js = ("js/maps_utils.js", "js/show_polygon.js", "https://maps.googleapis.com/maps/api/js?key={}".format(api_key),) def render(self, name, value, *args, **kwargs): div_id = 'poly_map_' + kwargs['attrs']['id'] input_name = name input_id = kwargs['attrs']['id'] if value is None or not is_polygon(value): raise Exception("ShowPolygonWidget requires the polygon to be defined" + str(value)) render_html = """ <div class='map_widget' id="{}"></div> <input type='hidden' name='{}' id='{}' value='' /> <script type="text/javascript"> google.maps.event.addDomListener(window, 'load', show_polygon_map('{}', '{}', {})); </script> """ render_html_with_id = render_html.format(div_id, input_name, input_id, div_id, input_id, value) return render_html_with_id def __init__(self, *args, **kwargs): super(ShowPolygonWidget, self).__init__(*args, **kwargs) class ShowPolygonField(forms.Field): def __init__(self, required= True, widget=ShowPolygonWidget, label=None, initial=None, help_text="", validators=[validate_polygon], *args, **kwargs): super(ShowPolygonField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, validators=validators, *args, **kwargs) self.disabled = True def to_python(self, value): # Convert to expected python value (list of lists of latlngs) value = super(ShowPolygonField, self).to_python(value) try: json_array = json.loads(value) except: raise ValidationError("Unable to parse input: '{}'".format(value), code="invalid") return json_array def validate(self, value): super(ShowPolygonField, self).validate(value) def widget_attrs(self, widget): attrs = super(ShowPolygonField, self).widget_attrs(widget) return attrs ##### Begin EditablePolygon widget and field class EditablePolygonWidget(forms.Widget): """ Widget for a user-editable Polygon form field """ class Media: css = { 'all' : ('css/poly_style.css',) } js = ("js/maps_utils.js", "js/demo_poly_draw.js", "https://maps.googleapis.com/maps/api/js?key={}".format(api_key),) def render(self, name, value, *args, **kwargs): div_id = 'poly_map_' + kwargs['attrs']['id'] input_name = name input_id = kwargs['attrs']['id'] if value is None or not is_polygon(value): value = 'null' render_html = """ <div class='map_widget' id="{}"></div> <input type='hidden' name='{}' id='{}' value='' /> <script type="text/javascript"> google.maps.event.addDomListener(window, 'load', show_editable_map('{}', '{}', {})); </script> """ render_html_with_id = render_html.format(div_id, input_name, input_id, div_id, input_id, value) return render_html_with_id def __init__(self, *args, **kwargs): super(EditablePolygonWidget, self).__init__(*args, **kwargs) class EditablePolygonField(forms.Field): def __init__(self, required= True, widget=EditablePolygonWidget, label=None, initial=None, help_text="", validators=[validate_polygon], *args, **kwargs): super(EditablePolygonField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, validators=validators, *args, **kwargs) def to_python(self, value): # Convert to expected python value (list of lists of latlngs) value = super(EditablePolygonField, self).to_python(value) try: json_array = json.loads(value) except: raise ValidationError("Unable to parse input: '{}'".format(value), code="invalid") return json_array def validate(self, value): super(EditablePolygonField, self).validate(value) def widget_attrs(self, widget): attrs = super(EditablePolygonField, self).widget_attrs(widget) return attrs class InlineLinkWidget(forms.Widget): """ Widget to display a participation item inline inside a form """ def render(self, name, value, *args, **kwargs): """ value is a link """ input_name = name input_id = kwargs['attrs']['id'] render_html = """ <input type='hidden' name='{}' id='{}' value='' /> <a href="{}">View here</a> """ render_html_with_id = render_html.format(input_name, input_id, value) return render_html_with_id def __init__(self, *args, **kwargs): super(InlineLinkWidget, self).__init__(*args, **kwargs) class InlineLinkField(forms.Field): def __init__(self, required= False, widget=InlineLinkWidget, label=None, initial=None, help_text="", validators=[], *args, **kwargs): super(InlineLinkField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, validators=validators, *args, **kwargs) self.disabled = True def to_python(self, value): return None def validate(self, value): super(InlineLinkField, self).validate(value) def widget_attrs(self, widget): attrs = super(InlineLinkField, self).widget_attrs(widget) return attrs #### Ajax string lookup utitlities class AjaxStringLookupWidget(forms.Widget): """ Widget for a string lookup with suggestions """ class Media: css = { 'all' : ("css/autocomplete.css",) } js = ("js/jquery.autocomplete.min.js", "js/setup_ajax.js", "js/ajax_string_lookup.js",) def render(self, name, value, *args, **kwargs): div_id = 'ajax_text_field_' + kwargs['attrs']['id'] input_name = name input_id = kwargs['attrs']['id'] if value is None: value = '' render_html = "<input type='text' size=\"40\" name='"+str(input_name)+"' id='"+str(input_id)+"' value='"+value+"' />\n" render_html += '<script type="text/javascript">\n' render_html += "attach_ajax_string_listener(\""+self.ajax_url+"\", \""+str(input_id)+"\")\n" render_html += "</script>\n" return render_html def __init__(self, ajax_url, *args, **kwargs): self.ajax_url = ajax_url super(AjaxStringLookupWidget, self).__init__(*args, **kwargs) class AjaxStringLookupField(forms.Field): def __init__(self, ajax_url, required= True, label=None, initial=None, help_text="", validators=[], *args, **kwargs): self.ajax_url = ajax_url # widget needs to be initialized every time, so it can't be done in the signature widget = AjaxStringLookupWidget(self.ajax_url) super(AjaxStringLookupField, self).__init__(required=required, widget=widget, label=label, initial=initial, help_text=help_text, validators=validators, *args, **kwargs) def widget_attrs(self, widget): attrs = super(AjaxStringLookupField, self).widget_attrs(widget) attrs["ajax_url"] = self.ajax_url return attrs class AjaxAutocomplete: def __init__(self, matching_object_query, suggestion_function, ajax_url): # function taking the query string as input and returning a query set self.matching_object_query = matching_object_query # function taking an item and returning the string suggestion to be displayed to the user self.suggestion_function = suggestion_function self.ajax_url = ajax_url def get_url_pattern(self): return "^"+self.ajax_url.lstrip("/")+"$" def ajax_autocomplete_view(self, request): if request.is_ajax(): v = request.body k, v = request.body.split('=') if k.strip() == "query": query_string = v.strip().replace('+',' ').lower() suggestion_set = self.matching_object_query(query_string) suggestions = [self.suggestion_function(x) for x in suggestion_set] ans = json.dumps({"query": query_string, "suggestions": suggestions}) return HttpResponse(ans, content_type="application/json") else: return HttpResponse("I can't handle that type of input:"+str(k)) else: return HttpResponse("this should be an ajax post") def get_new_form_field(self, **kwargs): return AjaxStringLookupField(self.ajax_url, **kwargs) # create aac in advance # form is imported by urls, states = {'Alabama', 'Alaska', 'Arizona', 'Arkansas', 'California', 'Colorado', 'Connecticut', 'Deleware', 'Florida', 'Georgia'} matching_object_query = lambda query: [i for i in states if i.lower().startswith(query)] suggestion_function = lambda item: "How about:"+item ajax_url = "autocomplete/" state_aac = AjaxAutocomplete(matching_object_query, suggestion_function, ajax_url) class SimpleTestWidgetForm(forms.Form): widget_a = forms.CharField(max_length=100) widget_b = state_aac.get_new_form_field() # widget_b = forms.CharField(max_length=100) # editable_polygon_field = EditablePolygonField(label="Test Polygon Field") # # editable_polygon_field_2 = EditablePolygonField(label="Test Polygon Field 2") # polygon_field = ShowPolygonField(label="Test Polygon Field", initial="[[1.0,2.0],[4.0,5.0],[7.0,1.0]]") # # polygon_field2 = ShowPolygonField(label="Test Polygon Field", initial="[[11.0,2.0],[41.0,5.0],[-7.0,1.0]]")
agpl-3.0
8,434,505,056,886,371,000
32.247863
128
0.564781
false
laurentb/weboob
modules/humanis/module.py
1
2124
# -*- coding: utf-8 -*- # Copyright(C) 2016 Jean Walrave # # This file is part of a weboob module. # # This weboob module is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This weboob module is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this weboob module. If not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from weboob.tools.backend import Module, BackendConfig from weboob.tools.value import ValueBackendPassword from weboob.capabilities.bank import CapBankWealth from .browser import HumanisBrowser __all__ = ['HumanisModule'] class HumanisModule(Module, CapBankWealth): NAME = 'humanis' DESCRIPTION = 'Humanis Épargne Salariale' MAINTAINER = 'Quentin Defenouillère' EMAIL = '[email protected]' LICENSE = 'LGPLv3+' VERSION = '2.1' CONFIG = BackendConfig( ValueBackendPassword('login', label='Code d\'accès', masked=False), ValueBackendPassword('password', label='Mot de passe') ) BROWSER = HumanisBrowser def create_default_browser(self): return self.create_browser( self.config['login'].get(), self.config['password'].get(), 'https://www.gestion-epargne-salariale.fr', 'epsens/', weboob=self.weboob ) def iter_accounts(self): return self.browser.iter_accounts() def iter_history(self, account): return self.browser.iter_history(account) def iter_investment(self, account): return self.browser.iter_investment(account) def iter_pocket(self, account): return self.browser.iter_pocket(account)
lgpl-3.0
-2,543,417,539,345,991,000
31.630769
77
0.69967
false
Huai-Xv/CSU_FreeClassroom
fc/parse.py
1
1621
# -*- coding: utf-8 -*- import re from fc import config from fc import db def parse(page, campus_id, room): db.room_insert(room['jsmc'], room['jsid'], campus_id) db_id = db.get_room_db_id(room_id=room['jsid']) # Class table for use table = [[6 * [0] for i in range(8)] for j in range(config.WEEK_NUM + 1)] # Get class info in the page result = re.findall(r'<div.*?class="kbcontent1" ?>(.*?)</div>', page) for i in range(len(result)): if i > 34: break day = i % 7 + 1 cls_id = int(i / 7) + 1 # If there are classes in the cell if not result[i] == "&nbsp;": # Get start week and end week cls = re.findall(r'<font title=\'(.*?)\((.*?)\)\([0-9]*?\)\'>', result[i]) for course in cls: duration_list = course[0].split(",") for duration in duration_list: arr = duration.split("-") if len(arr) == 1: table[int(arr[0])][day][cls_id] = 1 elif len(arr) == 2: for week in range(int(arr[0]), int(arr[1]) + 1): if (course[1] == "" or (course[1] == "单周" and week % 2 == 1) or ( course[1] == "双周" and week % 2 == 0)): table[week][day][cls_id] = 1 # Insert data into db for week in range(1, config.WEEK_NUM + 1): for day in range(1, 8): c = table[week][day] db.class_insert(db_id, week, day, c[1], c[2], c[3], c[4], c[5])
gpl-3.0
4,710,637,285,667,847,000
35.659091
93
0.451333
false
UpSea/midProjects
BasicOperations/00_Python/py2str.py
1
1261
#!/usr/bin/python # -*- coding: utf-8 -*- """ ------------------------------------------------------------------------------- Function: 銆愭暣鐞嗐€慞ython涓瓧绗︾紪鐮佺殑鎬荤粨鍜屽姣旓細Python 2.x鐨剆tr鍜寀nicode vs Python 3.x鐨刡ytes鍜宻tr http://www.crifan.com/summary_python_string_encoding_decoding_difference_and_comparation_python_2_x_str_unicode_vs_python_3_x_bytes_str Author: Crifan Verison: 2012-11-29 ------------------------------------------------------------------------------- """ def python2xStrToUnicode(): strUtf8 = 'w我哦我我我' print strUtf8.decode('UTF-8') print strUtf8.decode('gb2312') print "type(strUtf8)=",type(strUtf8); #type(strUtf8)= <type 'str'> decodedUnicode = strUtf8.decode("UTF-8"); print "You should see these unicode zh-CN chars in windows cmd normally: decodedUnicode=%s"%(decodedUnicode); #You should see these unicode zh-CN chars in windows cmd normally: decodedUnicode=1.姝ゅ鏄疷TF-8缂栫爜鐨勪腑鏂囧瓧绗� ...... 杞崲涓哄搴旂殑锛堟澶勭殑GBK锛夌紪鐮佺殑锛夛紱 ############################################################################### if __name__=="__main__": python2xStrToUnicode();
mit
4,476,341,651,733,551,000
45.083333
253
0.559276
false
tocisz/oraschemadoc
oraschemadoc/oracleobjects/oraclemview.py
2
1357
# Copyright (C) Petr Vanek <[email protected]>, 2005 # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # __author__ = 'Petr Vanek, <[email protected]>' from oracleview import OracleView class OracleMView(OracleView): def __init__(self, name, data_dict): # FIXME: real inheritance! OracleView.__init__(self, name, data_dict) self.name = name self.columns = self._get_columns(data_dict) self.constraints = self._get_constraints(data_dict) self.comments = data_dict.all_table_comments.get(name) self.triggers = self._get_triggers(data_dict) self.container, self.query, self.mv_updatable = data_dict.all_mviews[name] self.text = self.query
gpl-2.0
29,271,653,776,693,880
38.911765
82
0.713338
false
mozzwald/Fancy-Beeper-Daemon
daemons/alsabeepd.py
1
3987
#!/usr/bin/env python # written by Lars Immisch <[email protected]> # This is a custom beep daemon for the Fancy Beep Driver from # http://www.carcosa.net/jason/software/beep/ # This beep daemon also needs pyalsaaudio from # http://sourceforge.net/projects/pyalsaaudio import alsaaudio import sys import time import struct import cStringIO BEEP = '/usr/share/sounds/Pop.wav' # this is all a bit simplified, and won't cope with any wav extensions # or multiple data chunks, but we don't need that WAV_FORMAT_PCM = 1 WAV_HEADER_SIZE = struct.calcsize('<4sl4s4slhhllhh4sl') def wav_header_unpack(data): (riff, riffsize, wave, fmt, fmtsize, format, nchannels, framerate, datarate, blockalign, bitspersample, data, datalength) \ = struct.unpack('<4sl4s4slhhllhh4sl', data) if riff != 'RIFF' or fmtsize != 16 or fmt != 'fmt ' or data != 'data': raise ValueError, 'illegal wav header' return (format, nchannels, framerate, datarate, datalength) def daemonize(stdout='/dev/null', stderr=None, stdin='/dev/null', pidfile=None): ''' This forks the current process into a daemon. The stdin, stdout, and stderr arguments are file names that will be opened and used to replace the standard file descriptors in sys.stdin, sys.stdout, and sys.stderr. These arguments are optional and default to /dev/null. Note that stderr is opened unbuffered, so if it shares a file with stdout then interleaved output may not appear in the order that you expect. ''' import os import sys # Do first fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit first parent. except OSError, e: sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Decouple from parent environment. os.chdir("/") os.umask(0) os.setsid() # Do second fork. try: pid = os.fork() if pid > 0: sys.exit(0) # Exit second parent. except OSError, e: sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror)) sys.exit(1) # Open file descriptors and print start message if not stderr: stderr = stdout si = file(stdin, 'r') so = file(stdout, 'a+') se = file(stderr, 'a+', 0) pid = str(os.getpid()) if pidfile: f = file(pidfile,'w+') f.write("%s\n" % pid) f.close() # Redirect standard file descriptors. os.dup2(si.fileno(), sys.stdin.fileno()) os.dup2(so.fileno(), sys.stdout.fileno()) os.dup2(se.fileno(), sys.stderr.fileno()) def play(f): header = f.read(WAV_HEADER_SIZE) format, nchannels, framerate, datarate, datalength \ = wav_header_unpack(header) # Open the device in playback mode. out = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK) # Set attributes: Mono, 8000 Hz, 16 bit little endian frames out.setchannels(2) out.setrate(framerate) out.setformat(alsaaudio.PCM_FORMAT_S16_LE) # The period size controls the internal number of frames per period. # The significance of this parameter is documented in the ALSA api. # rs = framerate / 25 # out.setperiodsize(rs) data = f.read() while data: # Read data from stdin out.write(data) data = f.read() if __name__ == '__main__': if len(sys.argv) > 1: bf = open(sys.argv[1], 'rb') else: bf = open(BEEP, 'rb') beep = cStringIO.StringIO(bf.read()) bf.close() try: beepdev = open("/dev/beep") except IOError: print "Can't open beep device." sys.exit(0) try: daemonize(pidfile = '/var/run/alsabeepd.pid') while True: if beepdev.read(1): play(beep) beep.seek(0) else: continue finally: beepdev.close()
gpl-2.0
-7,155,436,854,510,651,000
26.6875
77
0.600702
false
Epic0ne/tbg
tbg/interfaces.py
1
1745
#!/usr/bin/env python3 """ Basic I/O for the TBG platform Copyright (C) 2017 Will Dereham This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ try: import IPython except ImportError: IPython = None class ConsoleInterface: """A basic I/O system using the console.""" @staticmethod def print(string): """Print a line to the console.""" print(string) @staticmethod def print_no_newline(string): """Print a line to the console without a newline.""" print(string, end='') @classmethod def input(cls, prompt): """Get input from the user.""" cls.print_no_newline(prompt) return input() class IPythonHTMLInterface: """ An I/O system using an IPython notebook to display HTML formatted output. (Note: requires Jupyter and IPython to be installed) """ def __init__(self): if not IPython: raise ImportError("IPython not installed") def print(self, string): IPython.display.display(IPython.display.HTML(string)) def print_no_newline(self, string): self.print(string) def input(self, prompt): return input(prompt)
gpl-3.0
6,568,817,993,779,375,000
26.698413
69
0.685387
false
jmtd/freedoom
scripts/music-duplicates.py
1
3648
#!/usr/bin/env python # # Find duplicated music tracks and create a summary report of music # that the project needs. from glob import glob import os import re import sys PHASE1_MATCH_RE = re.compile(r'(e\dm\d)', re.I) PHASE2_MATCH_RE = re.compile(r'(map\d\d)', re.I) FREEDM_MATCH_RE = re.compile(r'(dm\d\d)', re.I) DOOM2_TRACKS = ( 'runnin', 'stalks', 'countd', 'betwee', 'doom', 'the_da', 'shawn', 'ddtblu', 'in_cit', 'dead', 'stlks2', 'theda2', 'doom2', 'ddtbl2', 'runni2', 'dead2', 'stlks3', 'romero', 'shawn2', 'messag', 'count2', 'ddtbl3', 'ampie', 'theda3', 'adrian', 'messg2', 'romer2', 'tense', 'shawn3', 'openin', 'evil', 'ultima', ) def get_music_tracks(): """Returns a dictionary mapping from MIDI name (subpath of musics/) to a list of game tracks that use that MIDI.""" result = {} musics_path = os.path.join(os.path.dirname(sys.argv[0]), '../musics') for mus in glob('%s/*.mus' % musics_path): try: symlink = os.readlink(mus) if symlink not in result: result[symlink] = [] result[symlink].append(os.path.basename(mus)) except OSError: pass return result def doom2_level_for_file(filename): """Given a filename that may be named like a Doom 2 music name (eg. d_stalks.mid), get the level number it corresponds to or 0 if it doesn't match.""" filename = os.path.basename(filename) for i, doom2_name in enumerate(DOOM2_TRACKS): if filename.startswith('d_%s.' % doom2_name): return i + 1 else: return 0 def get_prime_track(midi, tracks): """Given a list of tracks that all use the same MIDI, find the "prime" one (the one that isn't a reuse/duplicate).""" # We have almost all Phase 2 tracks fulfilled. So if the same # track is used in Phase 1 and Phase 2, or Phase 2 and FreeDM, # the Phase 2 track is probably the leader. phase2_tracks = [x for x in tracks if PHASE2_MATCH_RE.search(x)] if len(phase2_tracks) == 1: return phase2_tracks[0] level = doom2_level_for_file(midi) if level: for track in phase2_tracks: if ('map%02i' % level) in track: return track # If the filename of the MIDI file (symlink target) describes a # level, then that level is the leader. m = PHASE1_MATCH_RE.search(os.path.basename(midi)) if m: level = m.group(1) for track in tracks: if level in track: return track # We're out of options. Pick the first one in the list. #print 'Warning: Don't know which of %s is the leader for %s.' % ( # tracks, midi) return tracks[0] def find_missing_tracks(tracks): """Given a dictionary of tracks, get a list of "missing" tracks.""" result = [] for midi, tracks in tracks.items(): if len(tracks) < 2: continue prime_track = get_prime_track(midi, tracks) result.extend(x for x in tracks if x != prime_track) return result def tracks_matching_regexp(tracks, regexp): return set([x for x in tracks if regexp.search(x)]) def print_report(title, tracks): if len(tracks) == 0: return print(title) for track in sorted(tracks): print('\t%s' % track.replace('.mus', '').upper()) print('') missing_tracks = set(find_missing_tracks(get_music_tracks())) phase1_tracks = tracks_matching_regexp(missing_tracks, PHASE1_MATCH_RE) phase2_tracks = tracks_matching_regexp(missing_tracks, PHASE2_MATCH_RE) freedm_tracks = tracks_matching_regexp(missing_tracks, FREEDM_MATCH_RE) other_tracks = missing_tracks - phase1_tracks - phase2_tracks - freedm_tracks print('=== Missing tracks (tracks currently using duplicates):\n') print_report('Phase 1 tracks:', phase1_tracks) print_report('Phase 2 tracks:', phase2_tracks) print_report('FreeDM tracks:', freedm_tracks) print_report('Other tracks:', other_tracks)
bsd-3-clause
-2,947,169,987,805,317,000
31.864865
77
0.686678
false
lcpt/xc_utils
python_modules/to_translate/loadCombination/sqlite/sqlite_comb.py
1
4256
# -*- coding: utf-8 -*- def SQLTcreaTablaComb(nmbDBase,nmbTabla): '''Creates a table to show the decomposition of the combinations.''' { nmbAccs= "" \pond_acciones { \for_each_accion {nmbAccs= (nmbAccs + " float," + sqlValue(getNombre))} nmbAccs= "("+copiaDesde(",",nmbAccs)+" float)" # Le quitamos la primera coma. } SQLTcreaTabla(nmbDBase,nmbTabla,nmbAccs){} } def SQLTinsertCombs(nmbDBase,nmbTabla): { sqlCmd= "" \for_each { sqlCmd= "insert into "+nmbTabla+ " ("+getDescomp.sqlNames+")"+" values ("+getDescomp.sqlValues+")" \sqlite{\nmbDBase{\execute_sql{sqlCmd}}} } } def SQLTtablaCombs(nmbDBase,nmbTabla,combinationsName,offset): '''Defines a table of combinations of the type being passed as parameter.''' cont= offset SQLTcreaTabla(nmbDBase,nmbTablaComb,"(idComb INT, descomp STRING)"){} \combinations { \combinationsName { sqlQuery= \for_each { sqlQuery= "insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + "," + sqlValue(getNombreExpandido) +")" \sqlite{\nmbDBase{\execute_sql{sqlQuery}}} cont= cont+1 } } } return cont } def SQLTtablaCombsULS(nmbDBase,nmbTabla,offset): '''Defines a table of combinations of the type being passed as parameter.''' { cont= offset SQLTcreaTabla(nmbDBase,nmbTabla,"(idComb INT, descomp STRING)"){} \combinations { \comb_elu_persistentes { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } \comb_elu_accidentales { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } \comb_elu_sismicas { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } } return cont } def SQLTtablaCombsSLSPF(nmbDBase,nmbTabla,offset): '''Creates a table to show the combinations for serviceability limit state in rare situations.''' { cont= offset SQLTcreaTabla(nmbDBase,nmbTabla,"(idComb INT, descomp STRING)"){} \combinations { \comb_els_poco_frecuentes { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } } return cont } def SQLTtablaCombsSLSF(nmbDBase,nmbTabla,offset): '''Creates a table to show the combinations for serviceability limit states in frequent situations.''' { cont= offset SQLTcreaTabla(nmbDBase,nmbTabla,"(idComb INT, descomp STRING)"){} \combinations { \comb_els_frecuentes { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } } return cont } def SQLTtablaCombsSLSCP(nmbDBase,nmbTabla,offset): '''Creates a table to show the combinations for serviceability limit states in quasi-permanent situations.''' { cont= offset SQLTcreaTabla(nmbDBase,nmbTabla,"(idComb INT, descomp STRING)"){} \combinations { \comb_els_cuasi_permanentes { \for_each { \sqlite{\nmbDBase{\execute_sql{"insert into " + nmbTabla + " values ("+ sqlValue(int(cont)) + ", " + sqlValue(getNombreExpandido) +")"}}} cont= cont+1 } } } return cont }
gpl-3.0
1,731,214,221,317,780,500
29.184397
153
0.539709
false
fedora-infra/the-new-hotness
hotness/responses/response_failure.py
1
4805
# -*- coding: utf-8 -*- # # Copyright (C) 2021 Red Hat, Inc. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from typing import Any from hotness.requests import Request from hotness.responses import Response class ResponseFailure(Response): """ Class that represents failure response returned from use case. It is send when some exception happen during the use case. Defines constants for error types. Attributes: type: Type of the failure. message: Error message. """ VALIDATOR_ERROR = "ValidatorError" BUILDER_ERROR = "BuilderError" DATABASE_ERROR = "DatabaseError" NOTIFIER_ERROR = "NotifierError" PATCHER_ERROR = "PatherError" INVALID_REQUEST_ERROR = "InvalidRequestError" def __init__(self, type: str, message: Any) -> None: """ Class constructor. """ self.type = type self.message = self._format_message(message) def _format_message(self, message: Any) -> Any: """ Formats the input message if the message inherits from Exception, otherwise just return it back. Params: message: Input message to format Returns: String if exception, otherwise return the same object we received. """ if type(message) is Exception: return "{}: {}".format(message.__class__.__name__, "{}".format(message)) return message @property def value(self): """ Returns the dict representation of the failure response. """ return {"type": self.type, "message": self.message} def __bool__(self) -> bool: """ Boolean representation of response. """ return False @classmethod def validator_error(cls, message: Any) -> "ResponseFailure": """ Creates response for validator failure. Params: message: Message to add to this error Returns: ResponseFailure object """ response = ResponseFailure( type=ResponseFailure.VALIDATOR_ERROR, message=message ) return response @classmethod def builder_error(cls, message: Any) -> "ResponseFailure": """ Creates response for builder failure. Params: message: Message to add to this error Returns: ResponseFailure object """ response = ResponseFailure(type=ResponseFailure.BUILDER_ERROR, message=message) return response @classmethod def database_error(cls, message: Any) -> "ResponseFailure": """ Creates response for database failure. Params: message: Message to add to this error Returns: ResponseFailure object """ response = ResponseFailure(type=ResponseFailure.DATABASE_ERROR, message=message) return response @classmethod def notifier_error(cls, message: Any) -> "ResponseFailure": """ Creates response for notifier failure. Params: message: Message to add to this error Returns: ResponseFailure object """ response = ResponseFailure(type=ResponseFailure.NOTIFIER_ERROR, message=message) return response @classmethod def patcher_error(cls, message: Any) -> "ResponseFailure": """ Creates response for patcher failure. Params: message: Message to add to this error Returns: ResponseFailure object """ response = ResponseFailure(type=ResponseFailure.PATCHER_ERROR, message=message) return response @classmethod def invalid_request_error(cls, request: Request) -> "ResponseFailure": """ Creates response for invalid request failure. Params: request: Invalid request to add to this error Returns: ResponseFailure object """ response = ResponseFailure( type=ResponseFailure.INVALID_REQUEST_ERROR, message=str(request.errors) ) return response
lgpl-2.1
-1,658,558,660,278,011,100
27.264706
88
0.628304
false
nuagenetworks/vspk-python
vspk/v6/nutcpconnecttestresult.py
1
8811
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from bambou import NURESTObject class NUTCPConnectTestResult(NURESTObject): """ Represents a TCPConnectTestResult in the VSD Notes: TCP Connect Test Result """ __rest_name__ = "None" __resource_name__ = "None" def __init__(self, **kwargs): """ Initializes a TCPConnectTestResult instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> tcpconnecttestresult = NUTCPConnectTestResult(id=u'xxxx-xxx-xxx-xxx', name=u'TCPConnectTestResult') >>> tcpconnecttestresult = NUTCPConnectTestResult(data=my_dict) """ super(NUTCPConnectTestResult, self).__init__() # Read/Write Attributes self._failed_attempts = None self._failed_percent = None self._maximum_round_trip_time = None self._minimum_round_trip_time = None self._connection_attempts = None self._successful_connections = None self._average_round_trip_time = None self.expose_attribute(local_name="failed_attempts", remote_name="failedAttempts", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="failed_percent", remote_name="failedPercent", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="maximum_round_trip_time", remote_name="maximumRoundTripTime", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="minimum_round_trip_time", remote_name="minimumRoundTripTime", attribute_type=float, is_required=False, is_unique=False) self.expose_attribute(local_name="connection_attempts", remote_name="connectionAttempts", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="successful_connections", remote_name="successfulConnections", attribute_type=int, is_required=False, is_unique=False) self.expose_attribute(local_name="average_round_trip_time", remote_name="averageRoundTripTime", attribute_type=float, is_required=False, is_unique=False) self._compute_args(**kwargs) # Properties @property def failed_attempts(self): """ Get failed_attempts value. Notes: The number of failed connection attempts This attribute is named `failedAttempts` in VSD API. """ return self._failed_attempts @failed_attempts.setter def failed_attempts(self, value): """ Set failed_attempts value. Notes: The number of failed connection attempts This attribute is named `failedAttempts` in VSD API. """ self._failed_attempts = value @property def failed_percent(self): """ Get failed_percent value. Notes: The percentage of failed connections This attribute is named `failedPercent` in VSD API. """ return self._failed_percent @failed_percent.setter def failed_percent(self, value): """ Set failed_percent value. Notes: The percentage of failed connections This attribute is named `failedPercent` in VSD API. """ self._failed_percent = value @property def maximum_round_trip_time(self): """ Get maximum_round_trip_time value. Notes: The maximum round trip time seen This attribute is named `maximumRoundTripTime` in VSD API. """ return self._maximum_round_trip_time @maximum_round_trip_time.setter def maximum_round_trip_time(self, value): """ Set maximum_round_trip_time value. Notes: The maximum round trip time seen This attribute is named `maximumRoundTripTime` in VSD API. """ self._maximum_round_trip_time = value @property def minimum_round_trip_time(self): """ Get minimum_round_trip_time value. Notes: The lowest round trip time seen This attribute is named `minimumRoundTripTime` in VSD API. """ return self._minimum_round_trip_time @minimum_round_trip_time.setter def minimum_round_trip_time(self, value): """ Set minimum_round_trip_time value. Notes: The lowest round trip time seen This attribute is named `minimumRoundTripTime` in VSD API. """ self._minimum_round_trip_time = value @property def connection_attempts(self): """ Get connection_attempts value. Notes: The number of connection attempts This attribute is named `connectionAttempts` in VSD API. """ return self._connection_attempts @connection_attempts.setter def connection_attempts(self, value): """ Set connection_attempts value. Notes: The number of connection attempts This attribute is named `connectionAttempts` in VSD API. """ self._connection_attempts = value @property def successful_connections(self): """ Get successful_connections value. Notes: Total number of successful connections This attribute is named `successfulConnections` in VSD API. """ return self._successful_connections @successful_connections.setter def successful_connections(self, value): """ Set successful_connections value. Notes: Total number of successful connections This attribute is named `successfulConnections` in VSD API. """ self._successful_connections = value @property def average_round_trip_time(self): """ Get average_round_trip_time value. Notes: Average Round Trip Time in milliseconds This attribute is named `averageRoundTripTime` in VSD API. """ return self._average_round_trip_time @average_round_trip_time.setter def average_round_trip_time(self, value): """ Set average_round_trip_time value. Notes: Average Round Trip Time in milliseconds This attribute is named `averageRoundTripTime` in VSD API. """ self._average_round_trip_time = value
bsd-3-clause
-2,218,880,847,134,124,800
31.278388
161
0.604585
false
eghuro/crawlcheck
src/checker/plugin/crawlers/sitemap_scanner.py
1
2559
from common import PluginType from bs4 import BeautifulSoup from yapsy.IPlugin import IPlugin from urllib.parse import urlparse import logging import gzip class SitemapScanner(IPlugin): category = PluginType.CRAWLER id = "sitemapScanner" contentTypes = ["application/gzip", "text/xml", "application/xml"] # https://www.sitemaps.org/protocol.html __limit_size = 50000000 # 50 MB __limit_records = 50000 def __init__(self): self.__queue = None self.__journal = None def setJournal(self, journal): self.__journal = journal def setQueue(self, queue): self.__queue = queue def check(self, transaction): log = logging.getLogger(__name__) content, size = self.__load_content(transaction) soup = BeautifulSoup(content, 'lxml-xml') urls = soup.findAll('url') if not urls: return # no urls or not a sitemap.xml if len(soup.findAll('sitemap')) == 0 or \ len(soup.findAll('sitemapindex')) == 0: return # not a sitemap.xml nor Sitemap index self.__test_conditions(size, len(urls), transaction.idno) self.__scan_urls(urls, transaction) def __load_content(self, transaction): if transaction.type == 'application/gzip': with gzip.open(transaction.file, 'rb') as f: content = f.read() size = len(content) else: content = transaction.getContent() size = transaction.cache['size'] return content, size def __scan_urls(self, urls, transaction): """ Go through soup of urls and record links. """ for u in urls: loc = u.find('loc').string p = urlparse(loc) if p.scheme not in ['http', 'https']: continue log.debug("Link from sitemap ("+transaction.uri+") to "+loc) self.__queue.push_link(loc, transaction) def __test_conditions(self, size, url_cnt, idno): if size > SitemapScanner__limit_size: self.__journal.foundDefect(idno, "sitemapsize", "Sitemap.xml size exceeds 50MiB", str(size), 0.6) if url_cnt > SitemapScanner.__limit_records: self.__journal.foundDefect(idno, "sitemaprecords", "Sitemap.xml exceeds 50 000 URLs", str(url_cnt), 0.6) # See: https://gist.github.com/chrisguitarguy/1305010
mit
-5,650,757,299,396,455,000
31.392405
73
0.565455
false
open-mmlab/mmdetection
mmdet/models/losses/focal_loss.py
1
7517
import torch import torch.nn as nn import torch.nn.functional as F from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss from ..builder import LOSSES from .utils import weight_reduce_loss # This method is only for debugging def py_sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): """PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ pred_sigmoid = pred.sigmoid() target = target.type_as(pred) pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target) focal_weight = (alpha * target + (1 - alpha) * (1 - target)) * pt.pow(gamma) loss = F.binary_cross_entropy_with_logits( pred, target, reduction='none') * focal_weight if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss def sigmoid_focal_loss(pred, target, weight=None, gamma=2.0, alpha=0.25, reduction='mean', avg_factor=None): r"""A warpper of cuda version `Focal Loss <https://arxiv.org/abs/1708.02002>`_. Args: pred (torch.Tensor): The prediction with shape (N, C), C is the number of classes. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): Sample-wise loss weight. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. """ # Function.apply does not accept keyword arguments, so the decorator # "weighted_loss" is not applicable loss = _sigmoid_focal_loss(pred.contiguous(), target, gamma, alpha, None, 'none') if weight is not None: if weight.shape != loss.shape: if weight.size(0) == loss.size(0): # For most cases, weight is of shape (num_priors, ), # which means it does not have the second axis num_class weight = weight.view(-1, 1) else: # Sometimes, weight per anchor per class is also needed. e.g. # in FSAF. But it may be flattened of shape # (num_priors x num_class, ), while loss is still of shape # (num_priors, num_class). assert weight.numel() == loss.numel() weight = weight.view(loss.size(0), -1) assert weight.ndim == loss.ndim loss = weight_reduce_loss(loss, weight, reduction, avg_factor) return loss @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): """`Focal Loss <https://arxiv.org/abs/1708.02002>`_ Args: use_sigmoid (bool, optional): Whether to the prediction is used for sigmoid or softmax. Defaults to True. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. alpha (float, optional): A balanced form for Focal Loss. Defaults to 0.25. reduction (str, optional): The method used to reduce the loss into a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum". loss_weight (float, optional): Weight of loss. Defaults to 1.0. """ super(FocalLoss, self).__init__() assert use_sigmoid is True, 'Only sigmoid focal loss supported now.' self.use_sigmoid = use_sigmoid self.gamma = gamma self.alpha = alpha self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): """Forward function. Args: pred (torch.Tensor): The prediction. target (torch.Tensor): The learning label of the prediction. weight (torch.Tensor, optional): The weight of loss for each prediction. Defaults to None. avg_factor (int, optional): Average factor that is used to average the loss. Defaults to None. reduction_override (str, optional): The reduction method used to override the original reduction method of the loss. Options are "none", "mean" and "sum". Returns: torch.Tensor: The calculated loss """ assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) if self.use_sigmoid: if torch.cuda.is_available() and pred.is_cuda: calculate_loss_func = sigmoid_focal_loss else: num_classes = pred.size(1) target = F.one_hot(target, num_classes=num_classes + 1) target = target[:, :num_classes] calculate_loss_func = py_sigmoid_focal_loss loss_cls = self.loss_weight * calculate_loss_func( pred, target, weight, gamma=self.gamma, alpha=self.alpha, reduction=reduction, avg_factor=avg_factor) else: raise NotImplementedError return loss_cls
apache-2.0
-4,598,363,959,446,810,000
40.530387
79
0.550619
false
MoveOnOrg/eventroller
event_store/models.py
1
11924
from __future__ import unicode_literals import hashlib import re from django.db import models from django.contrib.auth.models import User, Group from django.utils.html import format_html, mark_safe class Organization(models.Model): title = models.CharField(max_length=765) slug = models.SlugField(max_length=128, db_index=True) url = models.URLField(blank=True) facebook = models.CharField(max_length=128, null=True, blank=True) twitter = models.CharField(max_length=128, null=True, blank=True, help_text="do not include @") logo_thumb = models.URLField(blank=True) logo_big = models.URLField(blank=True) privacy_policy = models.URLField(blank=True) terms_and_conditions = models.URLField(blank=True) #default source id osdi_source_id = models.CharField(max_length=128) group = models.ForeignKey(Group, db_index=True) # for getting other folks' data; auto/re-generatable api_key = models.CharField(max_length=765, editable=False) def __str__(self): return self.title class Activist(models.Model): hashed_email = models.CharField(max_length=64, null=True, blank=True, help_text="sha256 hash hexdigest of the email address") email = models.CharField(max_length=765, null=True, blank=True) name = models.CharField(max_length=765, null=True, blank=True) member_system_pk = models.CharField(max_length=765, null=True, blank=True) member_system = models.ForeignKey('event_exim.EventSource', blank=True, null=True, db_index=True) phone = models.CharField(max_length=75, null=True, blank=True) def __str__(self): return self.name or 'Activist {}:{}'.format(str(self.member_system), self.member_system_pk) def hash(self_or_email, email=None): """Should work as a class OR instance method""" if email is None: if hasattr(self_or_email, 'email'): email = getattr(self, 'email', None) if email is None: raise Exception("You need to set the email or send it as an argument") else: email = self_or_email return hashlib.sha256(email.encode('utf-8')).hexdigest() def likely_same(self, other): eq_attrs = ('id', 'email', 'hashed_email', 'phone') for attr in eq_attrs: if getattr(self, attr) and getattr(self, attr)==getattr(other,attr,None): return True if self.member_system and self.member_system_pk\ and self.member_system_id == getattr(other,'member_system_id', None)\ and self.member_system_pk == getattr(other,'member_system_pk', None): return True return False EVENT_REVIEW_CHOICES = ((None, 'New'), ('reviewed', 'Claimed'), ('vetted', 'Vetted'), ('questionable', 'Questionable'), ('limbo', 'Limbo')) EVENT_PREP_CHOICES = (('', 'Unclaimed'), ('claimed', 'Claimed'), ('partially_prepped', 'Partially prepped'), ('fully_prepped', 'Fully prepped'), ('nocontact', 'Unable to contact')) CHOICES = { 'unknown': 0, #venues 'private home': 1, 'public space': 2, 'target location (e.g. congressional district office)': 3, 'virtual': 4, #ticket types 'open': 1, 'ticketed': 2, #is_private 'public': 0, 'private': 1, #is_searchable 'not searchable': 0, 'searchable': 1, } class Event(models.Model): #starting with ActionKit baseline, out of selfishness created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now=True) address1 = models.CharField(max_length=765, null=True, blank=True) address2 = models.CharField(max_length=765, null=True, blank=True) city = models.CharField(max_length=765, null=True, blank=True) state = models.CharField(max_length=765, null=True, blank=True) region = models.CharField(max_length=765, null=True, blank=True) postal = models.CharField(max_length=765, null=True, blank=True) zip = models.CharField(max_length=15, null=True, blank=True) plus4 = models.CharField(max_length=12, null=True, blank=True) country = models.CharField(max_length=765, null=True, blank=True) longitude = models.FloatField(null=True, blank=True) latitude = models.FloatField(null=True, blank=True) title = models.CharField(max_length=765) starts_at = models.DateTimeField(null=True, blank=True, db_index=True) ends_at = models.DateTimeField(null=True, blank=True) starts_at_utc = models.DateTimeField(null=True, blank=True) ends_at_utc = models.DateTimeField(null=True, blank=True) status = models.CharField(max_length=96, db_index=True, choices=(('active', 'active'), ('cancelled', 'cancelled'), ('deleted', 'deleted'), )) host_is_confirmed = models.IntegerField() is_private = models.IntegerField(choices=((0, 'public'), (1, 'private')), verbose_name="private or public") is_approved = models.IntegerField() attendee_count = models.IntegerField() max_attendees = models.IntegerField(null=True, blank=True) venue = models.CharField(max_length=765, blank=True) phone = models.CharField(max_length=765, blank=True) public_description = models.TextField(blank=True) directions = models.TextField(blank=True) note_to_attendees = models.TextField(blank=True) internal_notes = models.TextField(blank=True) #from ground-control #eventIdObfuscated: {type: GraphQLString}, organization_official_event = models.NullBooleanField(null=True) event_type = models.CharField(max_length=765, null=True, blank=True) organization_host = models.ForeignKey('Activist', blank=True, null=True, on_delete=models.SET_NULL) organization = models.ForeignKey('Organization', blank=True, null=True, db_index=True) organization_source = models.ForeignKey('event_exim.EventSource', blank=True, null=True, on_delete=models.SET_NULL, db_index=True) organization_campaign = models.CharField(max_length=765, db_index=True) organization_source_pk = models.CharField(max_length=765, blank=True, null=True, db_index=True) #this can be any other data the event source wants/needs to store # in this field to resolve additional information. It can be the original data, # but could also be more extended info like social sharing data source_json_data = models.TextField(null=True, blank=True) #hostId: {type: GraphQLString}, = add primary_host #localTimezone: {type: GraphQLString}, #not there, but starts_at + starts_at_utc sorta does that #duration: {type: GraphQLInt}, is_searchable = models.IntegerField(choices=((0, 'not searchable'), (1, 'searchable'))) private_phone = models.CharField(max_length=765, blank=True) #?todo #hostReceiveRsvpEmails: {type: GraphQLBoolean}, #rsvpUseReminderEmail: {type: GraphQLBoolean}, #rsvpEmailReminderHours: {type: GraphQLInt}, #from progressive events url = models.URLField(blank=True) #if present, does not need to be unique -- though probably should be by organization+campaign+eventtype slug = models.SlugField(blank=True, null=True, max_length=255) #someday: https://github.com/django-recurrence/django-recurrence #recurrences = MoneypatchedRecurrenceField(null=True) #osdi osdi_origin_system = models.CharField(max_length=765) #ticket_levels[] ticket_type = models.IntegerField(choices=((0, 'unknown'), (1, 'open'), (2, 'ticketed'))) share_url = models.URLField(blank=True) #share_options[] = facebook_share{title, desc, img}, twitter_share{msg}, email_share{subj,body} # See https://opencivicdata.readthedocs.io/en/latest/proposals/0002.html political_scope = models.CharField(max_length=765, null=True, blank=True) #ocdep, districts, etc maybe dupe = models.ForeignKey('self', null=True, blank=True, on_delete=models.SET_NULL) venue_category = models.IntegerField(choices=((0, 'unknown'), (1, 'private home'), (2, 'public space'), (3, 'target location (e.g. congressional district office)'), (4, 'virtual'), )) needs_organizer_help = models.IntegerField(null=True, blank=True, default=0) #these can be functions of the source: rsvp_url = models.URLField(blank=True, null=True) event_facebook_url = models.URLField(blank=True, null=True) organization_status_review = models.CharField(max_length=32, blank=True, null=True, db_index=True, choices=EVENT_REVIEW_CHOICES) organization_status_prep = models.CharField(max_length=32, blank=True, null=True, db_index=True, choices=EVENT_PREP_CHOICES) def host_edit_url(self, edit_access=False): src = self.organization_source if src and hasattr(src.api, 'get_host_event_link'): return src.api.get_host_event_link(self, edit_access=edit_access) def extra_management_html(self): src = self.organization_source if src and hasattr(src.api, 'get_extra_event_management_html'): return src.api.get_extra_event_management_html(self) return '' def handle_rsvp(self): return None #organization can implement def review_data(self): return { 'review_status': self.organization_status_review, 'prep_status': self.organization_status_prep, } @classmethod def phone_format(cls, phone): return format_html('<span style="white-space: nowrap">{}</span>', re.sub(r'^(\d{3})(\d{3})(\d{4})', '(\\1) \\2-\\3', phone)) @classmethod def political_scope_display(cls, political_scope): if political_scope: m = re.match( r'ocd-division/country:(?P<country>\w+)/\w+:(?P<region>\w+)/(?P<district_type>\w+):(?P<district>\w+)', political_scope) if m: return '{}_{}'.format(m.group('region').upper(), m.group('district').upper()) else: return political_scope return '' def get_political_scope_display(self): return self.political_scope_display(self.political_scope) def on_save_review(self, reviews, log_message=None): """ Upon review saving, it will call this method, which allows us to update through the EventSource. I know, I know -- signals are awesome, but this is a little less magical and I don't want events to have to 'know' about the reviewer app. """ src = self.organization_source if src and src.allows_updates: connector = src.api if connector and hasattr(connector, 'update_review'): connector.update_review(self, reviews, log_message) reviewkeys = { 'prep_status': 'organization_status_prep', 'review_status': 'organization_status_review' } changed = False for r in reviews: if r.key in reviewkeys: setattr(self, reviewkeys[r.key], r.decision) changed = True if changed: self.save()
mit
520,099,423,519,824,800
43.327138
118
0.613972
false
scholer/py2cytoscape
py2cytoscape/util/util_igraph.py
1
3207
# -*- coding: utf-8 -*- """ Conversion utilities for igraph """ import igraph as ig DEF_SCALING = 100.0 def from_igraph(igraph_network, layout=None, scale=DEF_SCALING): new_graph = {} network_data = {} elements = {} nodes = [] edges = [] # Convert network attributes network_attr = igraph_network.attributes() for key in network_attr: network_data[key] = igraph_network[key] # get network as a list of edges edges_original = igraph_network.es nodes_original = igraph_network.vs node_attr = igraph_network.vs.attributes() for idx, node in enumerate(nodes_original): new_node = {} data = {} data['id'] = str(node.index) data['name'] = str(node.index) for key in node_attr: data[key] = node[key] new_node['data'] = data if layout is not None: position = {} position['x'] = layout[idx][0] * scale position['y'] = layout[idx][1] * scale new_node['position'] = position nodes.append(new_node) # Add edges to the elements edge_attr = igraph_network.es.attributes() for edge in edges_original: new_edge = {} data = {} data['source'] = str(edge.source) data['target'] = str(edge.target) for key in edge_attr: data[key] = edge[key] new_edge['data'] = data edges.append(new_edge) elements['nodes'] = nodes elements['edges'] = edges new_graph['elements'] = elements new_graph['data'] = network_data return new_graph def to_igraph(network): nodes = network['elements']['nodes'] edges = network['elements']['edges'] network_attr = network['data'] node_count = len(nodes) edge_count = len(edges) g = ig.Graph() # Graph attributes for key in network_attr.keys(): g[key] = network_attr[key] g.add_vertices(nodes) # Add node attributes node_attributes = {} node_id_dict = {} for i, node in enumerate(nodes): data = node['data'] for key in data.keys(): if key not in node_attributes: node_attributes[key] = [None] * node_count # Save index to map if key == 'id': node_id_dict[data[key]] = i node_attributes[key][i] = data[key] for key in node_attributes.keys(): g.vs[key] = node_attributes[key] # Create edges edge_tuples = [] edge_attributes = {} for i, edge in enumerate(edges): data = edge['data'] source = data['source'] target = data['target'] edge_tuple = (node_id_dict[source], node_id_dict[target]) edge_tuples.append(edge_tuple) for key in data.keys(): if key not in edge_attributes: edge_attributes[key] = [None] * edge_count # Save index to map edge_attributes[key][i] = data[key] g.add_edges(edge_tuples) # Assign edge attributes for key in edge_attributes.keys(): if key == 'source' or key == 'target': continue else: g.es[key] = edge_attributes[key] return g
mit
-2,790,407,597,677,307,000
24.452381
65
0.555971
false
ActiveState/code
recipes/Python/580659_Find_Paper_Format_based_Pixel_Width_Height_PDF/recipe-580659.py
1
1787
def find_close(w, h): PaperSizes = { # add new: ensure that first number is <= second number 'A0': [2384, 3370], 'A1': [1684, 2384], 'A2': [1190, 1684], 'A3': [842, 1190], 'A4': [595, 842], 'A5': [420, 595], 'A6': [298, 420], 'A7': [210, 298], 'A8': [148, 210], 'B0': [2835, 4008], 'B1': [2004, 2835], 'B2': [1417, 2004], 'B3': [1001, 1417], 'B4': [709, 1001], 'B5': [499, 709], 'B6': [354, 499], 'B7': [249, 354], 'B8': [176, 249], 'B9': [125, 176], 'B10': [88, 125], 'C2': [1837, 578], 'C3': [578, 919], 'C4': [919, 649], 'C5': [649, 459], 'C6': [459, 323], 'Invoice': [396, 612], 'Executive': [522, 756], 'Letter': [612, 792], 'Legal': [612, 1008], 'Ledger': [792, 1224], } wi = int(round(w, 0)) hi = int(round(h, 0)) if w <= h: w1 = wi h1 = hi else: w1 = hi h1 = wi sw = str(w1) sh = str(h1) stab = [abs(w1-s[0])+abs(h1-s[1]) for s in PaperSizes.values()] small = min(stab) idx = stab.index(small) f = PaperSizes.keys()[idx] if w <= h: ff = f + "-P" ss = str(PaperSizes[f][0]) + " x " + str(PaperSizes[f][1]) else: ff = f + "-L" ss = str(PaperSizes[f][1]) + " x " + str(PaperSizes[f][0]) if small == 0: # exact fit return ff rtxt = "%s x %s (other), closest: %s = %s" # else show best fit rtxt = rtxt % (sw, sh, ff, ss) return rtxt
mit
-4,654,150,768,877,164,000
27.822581
92
0.371572
false
Mine15029/Magikal-Wazard-Bots
modules/music.py
1
10118
import discord from discord.ext import commands import asyncio import youtube_dl import datetime import modules.perms as perms # Error Suppression because fuck the console spam youtube_dl.utils.bug_reports_message = lambda: '' ytdl_format_options = { 'format': 'bestaudio/best', 'outtmpl': 'cache/%(extractor)s-%(id)s-%(title)s.%(ext)s', 'restrictfilenames': True, 'noplaylist': True, 'nocheckcertificate': True, 'ignoreerrors': False, 'logtostderr': False, 'quiet': True, 'no_warnings': True, 'default_search': 'auto', 'source_address': '0.0.0.0' # ipv6 addresses cause issues sometimes } ffmpeg_options = { 'before_options': '-nostdin', 'options': '-vn' } ytdl = youtube_dl.YoutubeDL(ytdl_format_options) class YTDLSource(discord.PCMVolumeTransformer): def __init__(self, source, *, data, volume=0.5): super().__init__(source, volume) self.data = data self.title = data.get('title') self.url = data.get('url') self.webpage_url = data.get('webpage_url') self.thumbnail = data.get('thumbnail') self.duration = data.get('duration') self.uploader = data.get('uploader') self.views = data.get('view_count') self.likes = data.get('like_count') self.dislikes = data.get('dislike_count') self.date = data.get('upload_date') @classmethod async def from_url(cls, url, *, loop=None): loop = loop or asyncio.get_event_loop() data = await loop.run_in_executor(None, ytdl.extract_info, url) if 'entries' in data: # take first item from a playlist data = data['entries'][0] filename = ytdl.prepare_filename(data) return cls(discord.FFmpegPCMAudio(filename, **ffmpeg_options), data=data) class Music: def __init__(self, bot): self.bot = bot self.player = None self.time_stamp = 0 self.pause_timer = 10 self.requester = [] self.queue_list = [] self.queue_name = [] self.loop_toggle = False async def queue_event(self, ctx): while True: while ctx.voice_client.is_playing(): await asyncio.sleep(1) self.pause_timer = 10 self.time_stamp += 1 if ctx.voice_client.is_paused(): self.pause_timer = 300 try: if not self.loop_toggle: self.queue_list.remove(self.queue_list[0]) self.queue_name.remove(self.queue_name[0]) self.requester.remove(self.requester[0]) self.player = await YTDLSource.from_url(self.queue_list[0], loop=self.bot.loop) ctx.voice_client.play(self.player, after=lambda e: print('Player error: %s' % e) if e else None) ctx.voice_client.source.volume = 0.1 self.time_stamp = 0 self.player = await YTDLSource.from_url(self.queue_list[0], loop=self.bot.loop) ctx.voice_client.play(self.player, after=lambda e: print('Player error: %s' % e) if e else None) ctx.voice_client.source.volume = 0.1 self.time_stamp = 0 except IndexError: pass if self.pause_timer <= 0 and not self.queue_list: await ctx.voice_client.disconnect() self.player = None self.next_toggle = False self.pause_timer = 10 self.time_stamp = 0 return await asyncio.sleep(1) self.pause_timer -= 1 @commands.command() @perms.admin_or_permissions(move_members=True) async def connect(self, ctx, channel: discord.VoiceChannel=''): try: await channel.connect() return except AttributeError: pass try: await ctx.author.voice.channel.connect() except AttributeError: await ctx.send("You are not connected to a voice channel.") @commands.command() @perms.admin_or_permissions(move_members=True) async def disconnect(self, ctx): try: self.queue_list = [] self.queue_name = [] self.requester = [] self.next_toggle = False self.time_stamp = 0 self.pause_timer = 10 self.player = None await ctx.voice_client.disconnect() except AttributeError: await ctx.send('Not connected to a voice channel.') @commands.command() async def play(self, ctx, *, url): if '.' not in url: url = 'ytsearch:' + url if ctx.voice_client is None: try: await ctx.author.voice.channel.connect() except AttributeError: return await ctx.send("Not connected to a voice channel.") if ctx.voice_client.is_playing(): if ctx.author.voice.channel != ctx.voice_client.channel: return await ctx.send('Must be in the same voice channel as the bot to add songs to queue!') self.pause_timer = 30 await ctx.send("Added to queue.") kek = await YTDLSource.from_url(url) self.queue_list.append(url) self.queue_name.append(kek.title) self.requester.append(ctx.author) self.player = await YTDLSource.from_url(url, loop=self.bot.loop) ctx.voice_client.play(self.player, after=lambda e: print('Player error: %s' % e) if e else None) self.queue_list.append(url) self.queue_name.append(self.player.title) self.requester.append(ctx.author) ctx.voice_client.source.volume = 0.1 await ctx.send('Now playing: {}. Type !music for more details.'.format(self.player.title)) await Music.queue_event(self, ctx) @commands.command() async def pause(self, ctx): try: if not ctx.voice_client.is_paused(): ctx.voice_client.pause() else: ctx.voice_client.resume() except AttributeError: await ctx.send('Not connected to a voice channel.') @commands.command() @perms.admin_or_permissions(move_members=True) async def stop(self, ctx): try: if ctx.voice_client.is_playing(): self.requester = [] self.queue_list = [] self.queue_name = [] self.next_toggle = False self.pause_timer = 10 self.player = None self.time_stamp = 0 return ctx.voice_client.stop() await ctx.send('Not currently playing.') except AttributeError: await ctx.send('Not connected to a voice channel.') @commands.command() async def skip(self, ctx): ctx.voice_client.stop() @commands.command() @perms.admin_or_permissions(move_members=True) async def loop(self, ctx): if self.loop_toggle: self.loop_toggle = False await ctx.send('Loop disabled.') return self.loop_toggle = True await ctx.send('Loop enabled.') return @commands.command() async def music(self, ctx): print(self.player.likes) try: if ctx.voice_client.is_playing(): queue = [] for x in self.queue_name[1:6]: queue.append(str(self.queue_name.index(x) + 1) + ". " + x) if len(self.queue_name) > 6: queue.append('`+' + str(len(self.queue_name) - 5) + '` extra in queue') if not self.player.duration > 3600: t_duration = str(datetime.timedelta(seconds=self.player.duration))[2:] # Total duration of video c_time = str(datetime.timedelta(seconds=self.time_stamp))[2:] # Current time in video duration = "{0} | {1}".format(c_time, t_duration) else: duration = str(datetime.timedelta(seconds=self.time_stamp)) + ' | ' + str( datetime.timedelta(seconds=self.player.duration)) date = str(datetime.datetime.strptime(self.player.date, '%Y%m%d')) date = date[:10] info = discord.Embed(colour=0xFFA500) info.set_author(name='Music Info', icon_url='http://www.free-icons-download.net/images/music-icon-66405.png') if self.player.thumbnail is not None: info.set_thumbnail(url=self.player.thumbnail) if self.loop_toggle: status = 'Currently Playing (Looping)' else: status = 'Currently Playing' info.add_field(name=status, value='[' + self.player.title + ']' + '(' + self.player.webpage_url + ')' + ' `' + duration + '`', inline=False) info.add_field(name='Uploader', value=self.player.uploader) info.add_field(name='Views', value=self.player.views) try: total = self.player.likes + self.player.dislikes percentage = round(self.player.likes / total * 100) info.add_field(name='Rating', value=str(self.player.likes) + '/' + str(total) + ' `' + str(percentage) + '% likes`') except TypeError: pass info.add_field(name='Upload Date', value=date) if queue: info.add_field(name='Queue', value='\n'.join(queue)) info.set_footer(text='Requested by ' + self.requester[0].display_name, icon_url=self.requester[0].avatar_url) return await ctx.send(embed=info) await ctx.send('Not currently playing.') except AttributeError: await ctx.send('Not connected to a voice channel.') def setup(bot): bot.add_cog(Music(bot))
apache-2.0
-7,607,463,302,170,390,000
37.037594
130
0.550702
false
aluminiumgeek/cc-telegram
modules/user_wa.py
1
1067
import wolframalpha def main(bot, *args, **kwargs): """ wa <query> Send query to Wolfram|Alpha. See also: g, w, img """ chat_id = kwargs.get('chat_id') app_id = getattr(bot.settings, 'wa_app_id', None) if not app_id: return 'Module is not configured. You must set `wa_app_id` in settings' client = wolframalpha.Client(app_id) query = ' '.join(args) if not query: return 'Invalid syntax' res = client.query(query) result = [] pods = getattr(res, 'pods', None) if not pods: return 'No results' for pod in res.pods: title = getattr(pod, 'title', None) if not title: continue result.append('<b>{}</b>'.format(title)) for sub in pod.subpods: text = sub.get('plaintext') if not text: continue result.append(' {}'.format(text.replace('\n', '\n '))) bot.send(chat_id=kwargs.get('chat_id'), text='\n'.join(result), data={'disable_web_page_preview': True, 'parse_mode': 'HTML'})
gpl-3.0
-5,632,842,328,801,915,000
29.485714
130
0.555764
false
SiamandMaroufi/PythonPOS
controllers/reports/saleList.py
1
1391
from common.config import _max_rows_, getDate, getTime, split from common.report import Report from common.reportName import ReportName from models.models import User @ReportName("sale-list") class SaleListReport(Report): Name = 'sale-list' def generate(self): username = self.StringReq('User') try: user = User.get(User.username == username) except: user = User.get() self.headers.append(('DateTime', getDate() + ' ' + getTime())) self.headers.append(('UserName', user.fullname())) list = user.salelist list = [d for d in list] users = User.select() for user in users: self.Echo('<User>') self.EchoTag('Id', user.username) self.EchoTag('UserName', user.fullname()) self.Echo('</User>') pages = split(list, _max_rows_) for list in pages: self.Echo('<Page>') for row in list: self.Echo('<Row>') self.EchoTag('Id', row.id) self.EchoTag('Time', row.time) self.EchoTag('Customer', row.customer.name) self.EchoTag('FullSale', row.fullsale) self.EchoTag('Prepaid', row.advance) self.EchoTag('Remain', row.remind()) self.Echo('</Row>') self.Echo('</Page>')
mit
-5,797,853,688,721,229,000
29.911111
70
0.539899
false
emillynge/lasagne-caterer
lasagnecaterer/cook.py
1
23882
""" For orchestrating the lasagna making process """ # builtins import os import tempfile import warnings from asyncio.subprocess import PIPE from collections import (namedtuple, OrderedDict, defaultdict, deque) from contextlib import contextmanager from functools import partial from typing import List, Union import re import asyncio from aiohttp import web import zipfile import atexit from datetime import datetime # pip import numpy as np import tqdm import theano # github packages from lasagnecaterer import oven from elymetaclasses.abc import io as ioabc # relative from . import recipe as LCrecipe from .utils import any_to_char_stream, pbar, best_gpu, ProgressMonitor, ChangeStream, \ JobProgress, async_subprocess, BatchSemaphore, MixinProp, MixinRequires, mixin_mock from .recipe import LasagneBase from .oven import Synthetics, FullArrayBatchGenerator from .menu import Options from .fridge import ClassSaveLoadMixin, TupperWare, BaseFridge class BaseCook(ClassSaveLoadMixin): def __init__(self, opt: Options, oven: FullArrayBatchGenerator, recipe: LasagneBase, fridge): self.opt = opt self.oven = oven self.recipe = recipe self.fridge = fridge box = fridge.shelves['cook'] assert isinstance(box, TupperWare) self.box = box self.open_all_shops() def close_all_shops(self): for klass in self.__class__.__mro__: if 'close_shop' in klass.__dict__: klass.__dict__['close_shop'](self) def open_all_shops(self): for klass in self.__class__.__mro__: if 'open_shop' in klass.__dict__: klass.__dict__['open_shop'](self) def open_shop(self): """ Actions to be taken after everything has been taken out of the fridge :return: """ if 'all_params' in self.fridge.shelves['recipe']: # noinspection PyPropertyAccess self.recipe.saved_params = self.fridge.shelves['recipe'].all_params def close_shop(self): """ Actions to be taken before we put everything in the fridge :return: """ self.fridge.shelves['recipe'][ 'all_params'] = self.recipe.get_all_params_copy() class LasagneTrainer(BaseCook): def train(self, epochs) -> List[None]: for x, y in self.oven.iter_epoch(epochs): yield self.recipe.f_train(x, y) def train_err(self, batches): for x, y in self.oven.iter_batch(batches, part='train'): yield self.recipe.f_cost(x, y) def val(self, epochs): for x, y in self.oven.iter_epoch(epochs, part='val'): yield self.recipe.f_cost(x, y) def test(self, epochs): for x, y in self.oven.iter_epoch(epochs, part='test'): yield self.recipe.f_cost(x, y) def auto_train(self, pbar=pbar): required_opt = ('start_epochs', 'decay_epochs') if any(o not in self.opt for o in required_opt): raise ValueError('Options missing. Need {}'.format(required_opt)) s_ep = self.opt.start_epochs def move_pbar(_pb, it): for nex in it: _pb.update(_pb.value + 1) yield nex print('Start train {} epochs\n'.format(s_ep)) bpe_train = self.oven.batches_per_epoch.train bpe_val = self.oven.batches_per_epoch.val message = lambda te, tr: 'te: {0:1.3f} tr: {1:1.3f}'.format(te, tr) pb = pbar('batches', s_ep * (bpe_train + bpe_val), ['te', 'tr']) pb.start() train_err_hist = list(move_pbar(pb, self.train(1))) val_err_hist = list(move_pbar(pb, self.val(1))) l1 = len(train_err_hist) l2 = len(val_err_hist) te_err = np.mean(val_err_hist[-l2:]) tr_err = np.mean(train_err_hist[-l1:]) max_err = tr_err pb.dynamic_messages['tr'] = tr_err pb.dynamic_messages['te'] = te_err prev_p, p = (None, None) @contextmanager def reset_if_nan(): nonlocal p, prev_p prev_p, p = p, self.recipe.get_all_params_copy() yield if np.isnan(train_err_hist[-1]): p, prev_p = prev_p, None if p: self.recipe.set_all_params(p) else: raise AttributeError('Recived non-recoverable NaN results') for j in range(self.opt.start_epochs - 1): with reset_if_nan(): train_err_hist.extend(move_pbar(pb, self.train(1))) val_err_hist.extend(move_pbar(pb, self.val(1))) te_err = np.mean(val_err_hist[-l2:]) tr_err = np.mean(train_err_hist[-l1:]) pb.dynamic_messages['tr'] = tr_err pb.dynamic_messages['te'] = te_err pb.finish() perf_tol = self.opt.get('perf_tol', 0.0) MEM = self.opt.get('opt_mem', 10) grace_epochs = self.opt.get('grace_epochs', s_ep + MEM) - s_ep params = deque([self.recipe.get_all_params_copy()]) te_err = te_err if not np.isnan(te_err) else max_err * 10 val_err = deque([te_err]) pb = pbar('epochs', self.opt.decay_epochs, ['te', 'tr']) for _i in pb(range(self.opt.decay_epochs)): i = _i + 1 try: with reset_if_nan(): train_err_hist.extend(self.train(1)) val_err_hist.extend(self.val(1)) te_err = np.mean(val_err_hist[-l2:]) tr_err = np.mean(train_err_hist[-l1:]) pb.dynamic_messages['tr'] = tr_err pb.dynamic_messages['te'] = te_err te_err = te_err if not np.isnan(te_err) else max_err * 10 except KeyboardInterrupt: break # build or rotate deque if i < MEM: params.appendleft(self.recipe.get_all_params_copy()) val_err.appendleft(te_err) else: params.rotate() val_err.rotate() min_val_err = np.min(val_err) # store or discard if min_val_err >= te_err: # current error is as good or better than all previous # store params[0] = self.recipe.get_all_params_copy() val_err[0] = te_err elif min_val_err == val_err[0]: # oldest model is best. stop # give up print('Early stopping..', val_err) break if i >= grace_epochs: oldest_val_err = val_err[-1] # check if we are making progress # oldest model should have perf_tol worse performance PER STEP # than current best expected_dec = (1 - perf_tol) ** (len(val_err) - 1) if perf_tol and (oldest_val_err * expected_dec < min_val_err): print('Early stopping..', val_err) break else: print('Did not find a minimum. Stopping') pb.finish() idx_best = np.argmin(val_err) print('Stopped training. choosing model #{} -> '.format(idx_best), val_err) self.recipe.set_all_params(params[idx_best]) self.box['params'] = params[idx_best] self.box['val_error_hist'] = val_err_hist self.box['train_error_hist'] = train_err_hist @mixin_mock class CookMixinBase: @mixin_mock @property def recipe(self) -> LasagneBase: return None @mixin_mock @property def oven(self) -> FullArrayBatchGenerator: return None @mixin_mock @property def fridge(self) -> BaseFridge: return None @mixin_mock class CharmapMixin(CookMixinBase): @mixin_mock @property def oven(self) -> oven.CharmappedGeneratorMixin: return None def open_shop(self): if 'charmap' in self.fridge.shelves['oven']: self.oven.charmap = self.fridge.shelves['oven'].charmap def close_shop(self): self.fridge.shelves['oven']['charmap'] = self.oven.charmap @mixin_mock class LearningRateMixin(CookMixinBase): @mixin_mock @property def recipe(self) -> Union[LasagneBase, LCrecipe.LearningRateMixin]: return None epochs_trained = MixinProp(default=0) def train(self, epochs) -> List[None]: step = max(self.epochs_trained - self.opt.start_epochs, 0) self.recipe.set_learning_rate(step=step) yield from super().train(epochs) self.epochs_trained += epochs @mixin_mock class ResetStateMixin(CookMixinBase): @mixin_mock @property def recipe(self) -> Union[LasagneBase, LCrecipe.StateReuseMixin]: return None def train(self, *args, **kwargs) -> List[None]: self.recipe.reset_hidden_states() return super().train(*args, **kwargs) def train_err(self, *args, **kwargs) -> List[None]: self.recipe.reset_hidden_states() return super().train_err(*args, **kwargs) def val(self, *args, **kwargs) -> List[None]: self.recipe.reset_hidden_states() return super().val(*args, **kwargs) def test(self, *args, **kwargs) -> List[None]: self.recipe.reset_hidden_states() return super().test(*args, **kwargs) class AsyncHeadChef(LasagneTrainer): compiled_base_dir = theano.config.base_compiledir def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.loop = asyncio.get_event_loop() self.startup_lock = asyncio.locks.Lock(loop=self.loop) self.semaphore = None self.basemodel_path = None self.progress_mon = ProgressMonitor('test') self.active_procs = list() def revive(self): self.terminated = False self.progress_mon.revive() def make_feature_net(self, **features): feature_name = sorted(features.keys()) # sort alphabetically if not features: return list() feature_name = sorted(feature_name, # sort number of possible values key=lambda k: len(features[k]))[0] feature_list = features.pop(feature_name) for feature in feature_list: res = [(feature_name, feature)] if not features: yield res continue for iter_res in self.make_feature_net(**features): yield res + iter_res def featurenet_train(self, out_dir, session_id, concurrent, *feature_packs, **feature_permutations): try: os.mkdir(out_dir) except IOError: pass self.revive() self.basemodel_path = out_dir + os.sep + 'basemodel.lfr' self.fridge.save(self.basemodel_path, manifest=('lasagnecaterer',)) """ fd_bm, self.basemodel_path = tempfile.mkstemp(suffix='.lrf', prefix='basemodel', dir=os.path.abspath('.')) with open(fd_bm, 'wb') as fp: self.fridge.save(fp) atexit.register(partial(os.remove, self.basemodel_path)) """ self.semaphore = BatchSemaphore(concurrent) override_permutations = list(self.make_feature_net(**dict(feature_permutations))) if not feature_packs: override_combinations = override_permutations elif not override_permutations: override_combinations = list(feature_packs) else: override_combinations = list() for pack in feature_packs: override_combinations.extend(perm + list(pack) for perm in override_permutations) override_combinations.sort() to_do = [self.train_model(overrides, out_dir) for overrides in override_combinations] job_names = [self.prefix_from_overrides(o) for o in override_combinations] #try: if True: with self.progress_mon.change_q.redirect_stdout(copy=True): with self.progress_mon.change_q.redirect_stderr(copy=True): coros = asyncio.gather(self.progress_mon.start(session_id, job_names), self.trainer_coro(to_do)) self.loop.run_until_complete(coros) #finally: # self.loop.run_until_complete(self.terminate()) async def terminate(self): while self.active_procs: proc = self.active_procs.pop() assert isinstance(proc, asyncio.subprocess.Process) try: proc.terminate() proc.kill() except: pass if not self.progress_mon.terminated: await self.progress_mon.terminate() async def trainer_coro(self, to_do): n = len(to_do) to_do_iter = asyncio.as_completed(to_do) to_do_iter = tqdm.tqdm(to_do_iter, total=n) i = 0 for future in to_do_iter: await future i += 1 await self.progress_mon.terminate() print('Done') async def write_to_log(self, stream: asyncio.streams.StreamReader, logfile: ioabc.OutputStream, prefix): pbar_regx = re.compile(b'(\d+)%\ ?\|#+') state = 1 while True: # print('waiting on ' + prefix) data = await stream.readline() # this should be bytes # print('read {} bytes'.format(len(data))) if not data and stream.at_eof(): # print('breaking') break for percent in [int(percent) for percent in pbar_regx.findall(data)]: self.job_progress(prefix, percent, 'running-{}'.format(state)) if percent == 100: state += 1 logfile.write(data.decode()) def job_progress(self, prefix, percent, stage): self.progress_mon.change_q.put_nowait( JobProgress(prefix, percent, stage)) @staticmethod def prefix_from_overrides(overrides): return '_'.join( '{0}-{1:03.0f}'.format(key, val * 1000 if val <= 1 and isinstance(val, float) else val) for key, val in overrides) async def train_model(self, overrides, out_dir, overwrite=False): prefix = self.prefix_from_overrides(overrides) fname = out_dir + os.sep + prefix + '.lfr' if os.path.isfile(fname) and not overwrite: warnings.warn('{} already exist'.format(fname)) try: zipfile.ZipFile(fname) except zipfile.BadZipFile: warnings.warn('not a valid zip file -> overwrite'.format(fname)) else: warnings.warn('Skipping...') self.job_progress(prefix, 100, 'complete') return with (await self.semaphore) as sem_id: self.job_progress(prefix, 33, 'init') # get a compiledir comp_dir = self.compiled_base_dir + '/semaphore-1-' + str(sem_id) # make sure we only start 1 new process at a time so we don't put # all on same GPU with (await self.startup_lock): self.job_progress(prefix, 66, 'init') # find best gpu. Wait if no suitable gpu exists gpu = await self.progress_mon.best_gpu(max_load=70) # define environment -> set execution on specific GPU _env = dict(os.environ) _env['THEANO_FLAGS'] = 'base_compiledir={0},device=gpu{1}'.format( comp_dir, gpu.dev) # make sure compiledir exists try: os.mkdir(comp_dir) except FileExistsError: pass # Start up a worker print('Started on {0} at {3} using' ' ENV={1} and {2}'.format(prefix, _env['THEANO_FLAGS'], gpu, datetime.now().strftime('%H:%M') )) self.job_progress(prefix, 80, 'init') p = await async_subprocess('mypython3', '-i', self.basemodel_path, stdout=PIPE, stderr=PIPE, stdin=PIPE, env=_env) self.active_procs.append(p.p) stdin = p.stdin assert isinstance(stdin, asyncio.streams.StreamWriter) def wrap(lines) -> bytes: cmd = b'import sys;import os;return_code = 1;' cmd += b';'.join(lines) cmd += b';return_code = 0\n' return cmd lines = [b'print("setting opts")', b'print("PID: {}".format(os.getpid()))'] # send commands to worker to change features for feature_name, value in overrides: lines.append('fr.opt.{0} = {1}'.format(feature_name, value).encode()) lines.append(b'sys.stdout.write(str(fr.opt))') lines.append(b'fr.recipe.reset_params()') # startup the training lines.append(b'fr.cook.auto_train()') lines.append('fr.save("{0}")'.format(fname).encode()) # wrap lines to one statement and feed to process cmd = wrap(lines) stdin.write(cmd) # call sys exit so process will terminate stdin.write(b'print("return_code: ", return_code)\n') stdin.write(b'sys.exit(return_code)\n') await stdin.drain() # wait some time for the worker to actually start using the # GPU before releasing startup lock for i in range(40): await asyncio.sleep(1) self.job_progress(prefix, 60 + i, 'init') # startup lock released here # write to progress_monitor and log - wait for process with open(out_dir + os.sep + prefix + '.log', 'w', buffering=1) as logfile: logfile.write(cmd.decode()) logfile.flush() # noinspection PyTypeChecker to_do = [self.write_to_log(p.stdout, logfile, prefix), self.write_to_log(p.stderr, logfile, prefix), p.p.wait()] await asyncio.gather(*to_do, loop=self.loop) # process terminated. remove process from active list # and determine if successful self.active_procs.remove(p.p) term_t = datetime.now().strftime('%H:%M') if p.p.returncode == 0: self.job_progress(prefix, 100, 'complete') print('Completed ' + fname + 'at ' + term_t) else: # return code NOT 0 - something has gone wrong self.job_progress(prefix, 100, 'dead') warnings.warn( 'job "{0}" ended with returncode {1} at {2}'.format(prefix, p.p.returncode, term_t)) def __del__(self): self.terminate() """ import asyncio import tqdm from time import sleep import signal loop = asyncio.get_event_loop() import theano import os from asyncio.subprocess import PIPE import re env = os.environ #FNULL = open(os.devnull, 'w') import shutil def remove_lock(cmp_dir): for root, dirs, files in os.walk(cmp_dir): if 'lock_dir' in dirs: shutil.rmtree(root + '/lock_dir') #if 'lock' in files and 'lock_dir' in root: # for f in files: # if f == 'lock': # # os.remove(root + '/' + f) processes = list() pbar_regx = re.compile(b'(\d+%) \|#+') start_up_lock = asyncio.locks.Lock(loop=loop) async def train_model(dropout, semaphore): with (await semaphore): fname = 'models/dropoutcv3/n3l512do_{:3}'.format(int(dropout*100)).replace(' ', '0') cmp_dir = compiled_base_dir + '/semaphore-1-' + str(semaphore._value) with open(fname + '.log', 'wb', buffering=1) as logfile: with await start_up_lock: gpu = best_gpu() while gpu.free < 2000: print('Best gpu: {} - Waiting...'.format(gpu)) await asyncio.sleep(1) gpu = best_gpu() _env = dict(env) _env['THEANO_FLAGS'] = 'base_compiledir={0},device=gpu{1}'.format(cmp_dir, gpu.dev) try: os.mkdir(cmp_dir) except FileExistsError: pass print('Started on ' + fname + ' using ' + _env['THEANO_FLAGS']) #sleep(10) p = await asyncio.create_subprocess_exec('mypython3', u'basemodel.lm', '--train', fname + '.lm', '--dropout', str(dropout), stdout=PIPE, stderr=PIPE, env=_env) await asyncio.sleep(10) # wait some time to release lock def read_to_log(): #print(p.stdout._buffer) if p.stderr._buffer: f = pbar_regx.findall(p.stderr._buffer) if f: print('{0} at {1}'.format(fname, f[0].decode())) logfile.write(p.stdout._buffer) p.stdout._buffer.clear() logfile.write(p.stderr._buffer) p.stderr._buffer.clear() #print('flushing') logfile.flush() processes.append(p) while True: try: #print('waiting') await asyncio.wait_for(p.wait(), 1) read_to_log() break except asyncio.TimeoutError: #print('timed out') read_to_log() #logfile.flush() #pass processes.remove(p) remove_lock(cmp_dir) print('Completed ' + fname) #counter[status] += 1 # <12> #return counter # <13> dropouts = np.arange(0.0, .95, .05) #[.1, .2, .3, .4, .5, .6, .7, .8] try: loop.run_until_complete(trainer_coro(dropouts, 5)) loop.close() finally: for p in processes: try: print('interrupting {}'.format(p)) p.send_signal(signal.SIGINT) sleep(5) except Exception: pass try: print('terminating {}'.format(p)) p.terminate() sleep(1) except Exception: pass try: print('killing {}'.format(p)) p.kill() except Exception: pass """
gpl-3.0
-9,100,366,519,454,124,000
34.751497
99
0.513818
false
h3llrais3r/Auto-Subliminal
tests/notifiers/test_growl.py
1
3468
# coding=utf-8 from autosubliminal.core.item import DownloadItem, WantedItem from autosubliminal.notifiers.growl import GrowlNotifier notifier_name = 'Growl' download_item = DownloadItem(WantedItem()) download_item.videopath = 'path/to/video' download_item.subtitlepath = 'path/to/subtitle' download_item.downlang = 'en' download_item.provider = 'provider' def test_growl_disabled(): notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.notify('test') is False assert notifier.notify_download(download_item) is False def test_growl_error(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) mocker.patch('gntp.notifier.GrowlNotifier.notify', return_value=False) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.notify('test') is False assert notifier.notify_download(download_item) is False def test_growl_exception(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) # monkeypatch.setattr('gntp.notifier.GrowlNotifier.notify', Exception) # seems to work also mocker.patch('gntp.notifier.GrowlNotifier.notify', side_effect=Exception) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.notify('test') is False assert notifier.notify_download(download_item) is False def test_growl_registration_error(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) mocker.patch('gntp.notifier.GrowlNotifier.register', return_value=False) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.test() is False def test_growl_registration_exception(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) mocker.patch('gntp.notifier.GrowlNotifier.register', side_effect=Exception) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.test() is False def test_growl_notify_download(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) mocker.patch('gntp.notifier.GrowlNotifier.notify', return_value=True) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.notify('test') is True assert notifier.notify_download(download_item) is True def test_growl_test(monkeypatch, mocker): monkeypatch.setattr('autosubliminal.NOTIFYGROWL', True) monkeypatch.setattr('autosubliminal.GROWLHOST', 'localhost') monkeypatch.setattr('autosubliminal.GROWLPORT', 23053) mocker.patch('gntp.notifier.GrowlNotifier.register', return_value=True) mocker.patch('gntp.notifier.GrowlNotifier.notify', return_value=True) notifier = GrowlNotifier() assert notifier.name == notifier_name assert notifier.test() is True
gpl-3.0
-3,922,275,213,408,100,400
40.285714
95
0.760381
false
raphiz/bsAbstimmungen
tests/importer/votingimporter_test.py
1
1833
from bsAbstimmungen.importer import votingimporter from datetime import datetime from mock import patch import os import shutil from ..utils import mockdb @patch('bsAbstimmungen.importer.votingimporter.VotingScraper.find') @patch('bsAbstimmungen.importer.votingimporter.VotingParser.parse') @patch('bsAbstimmungen.importer.votingimporter.utils.download') def test_fetch(download, parse, find, mockdb): f = datetime(year=2014, month=2, day=1) t = datetime(year=2014, month=2, day=28) find.return_value = [ 'http://abstimmungen.grosserrat-basel.ch/archiv/Amtsjahr_2014-2015/' '2014.02.12/Abst_0475_20140212_092150_0001_0000_ab.pdf', 'http://abstimmungen.grosserrat-basel.ch/archiv/Amtsjahr_2014-2015/' '2014.02.12/Abst_0493_20140212_114415_0017_0000_sa.pdf'] votingimporter.fetch(mockdb, f, t, directory='test_cache') # Verify the scraper is called find.assert_called_with(f, t) # Verify the download calls assert 2 == len(download.mock_calls) download.assert_any_call( 'http://abstimmungen.grosserrat-basel.ch/archiv/Amtsjahr_2014-2015/' '2014.02.12/Abst_0475_20140212_092150_0001_0000_ab.pdf', 'test_cache/Abst_0475_20140212_092150_0001_0000_ab.pdf', ) download.assert_any_call( 'http://abstimmungen.grosserrat-basel.ch/archiv/Amtsjahr_2014-2015/' '2014.02.12/Abst_0493_20140212_114415_0017_0000_sa.pdf', 'test_cache/Abst_0493_20140212_114415_0017_0000_sa.pdf' ) # Verify the parse method was called for both assert 2 == len(parse.mock_calls) parse.assert_any_call( 'test_cache/Abst_0493_20140212_114415_0017_0000_sa.pdf' ) parse.assert_any_call( 'test_cache/Abst_0475_20140212_092150_0001_0000_ab.pdf' ) # Clean up... shutil.rmtree('test_cache')
mit
2,836,102,309,643,791,400
34.25
76
0.698854
false
Monofraps/Mocca
lib/Actors.py
1
5913
import os import sys import platform import subprocess from .MessageFormatter import mocca_info, mocca_debug from .StringInterpolator import interpolate_string # The OS we're running on will be used to selectively skip dependencies target_os = sys.platform target_arch = platform.machine() class MoccaProject: """ Class wrapping business logic on top of the Project model """ def __init__(self, project_model, project_root): self.model = project_model self.root = project_root self.resolved_variables = {} self._resolve_variables() def sync_dependencies(self): """ Syncs all dependencies in the project """ for dependency in self.model.dependencies: MoccaDependency(dependency, self.root, self.resolved_variables).sync() def save(self): """ Saves the project's .mocca file """ file_descriptor = open(os.path.join(self.root, '.mocca'), 'w') self.model.to_json_file(file_descriptor) file_descriptor.close() def _resolve_variables(self): """ Tries to resolve all project variables """ for (key, value) in self.model.variables.items(): if value == '<(env': if key not in os.environ: raise RuntimeError("Unresolved project variable {0}".format(key)) self.resolved_variables[key] = os.environ[key] class MoccaDependency: """ Class wrapping business logic on top of a project's Dependency model """ def __init__(self, dependency_model, project_root, variables): """ Creates a dependency wrapper from a Dependency model and the project's root directory :param dependency_model: The underlying dependency model :param project_root: The directory containing the .mocca file. This is needed to resolve relative paths. :param variables: A dict containing resolved project variables """ self._model = dependency_model self.root = project_root self.project_vars = variables if os.path.isabs(self._get_model_path()): self.abs_path = self._get_model_path() else: self.abs_path = os.path.abspath(os.path.join(self.root, self._get_model_path())) self.branch = self._get_model_branch() if not self.branch: if self._get_model_vcs() == 'git': self.branch = 'master' elif self._get_model_vcs() == 'hg': self.branch = 'default' def sync(self): """ Syncs the dependency """ mocca_debug("Target OS: {0}:{1}".format(target_os, target_arch)) if not len(self._model.target_os) == 0: is_target_os = False for tos in self._model.target_os: if tos.split(':')[0] != target_os: continue if ':' in tos and tos.split(':')[1] != target_arch: continue is_target_os = True if not is_target_os: mocca_info("Skipping {0} because target os requirement not met".format(self._model.path)) return if not os.path.exists(self.abs_path): os.makedirs(self.abs_path) if not os.access(self.abs_path, os.W_OK): raise RuntimeError("Cannot write to {0}".format(self.abs_path)) if self.is_cloned(): self.pull() else: self.clone() def is_cloned(self): """ Checks whether the dependency's repo has already been cloned (i.e. whether a clone or pull is necessary)""" if self._model.vcs == 'git': return os.path.isdir(os.path.join(self.abs_path, '.git')) elif self._model.vcs == 'hg': return os.path.isdir(os.path.join(self.abs_path, '.hg')) def pull(self): """ Performs a pull """ print("") mocca_info("Pulling updates from {0} {1} into {2}" .format(self._get_model_url(), self.branch, self._get_model_path())) args = [] if self._model.vcs == 'git': args += ['git', 'pull'] elif self._model.vcs == 'hg': args += ['hg', 'pull'] mocca_debug(self.abs_path) mocca_info(' '.join(args)) subprocess.Popen(args, cwd=self.abs_path).wait() self.post_pull() def post_pull(self): """ Performs post-pull operations (i.e. branch/tag checkouts """ print("") mocca_info("Running post-pull actions for {0} {1}" .format(self._get_model_path(), self.branch)) args = [] if self._model.vcs == 'git': args += ['git', 'checkout', self.branch] elif self._model.vcs == 'hg': args += ['hg', 'up', self.branch] mocca_debug(self.abs_path) mocca_info(' '.join(args)) subprocess.Popen(args, cwd=self.abs_path).wait() def clone(self): """ Performs a clone """ print("") mocca_info("Cloning {0} {1} into {2}".format(self._get_model_url(), self.branch, self._get_model_path())) args = [] if self._model.vcs == 'git': args += ['git', 'clone', self._get_model_url(), '--branch', self.branch, '--single-branch', self.abs_path] elif self._model.vcs == 'hg': args += ['hg', 'clone', '-r', self.branch, self._get_model_url(), self.abs_path] mocca_debug(self.abs_path) mocca_info(' '.join(args)) subprocess.check_call(args) def _get_model_branch(self): return interpolate_string(self._model.branch, self.project_vars) def _get_model_vcs(self): return interpolate_string(self._model.vcs, self.project_vars) def _get_model_path(self): return interpolate_string(self._model.path, self.project_vars) def _get_model_url(self): return interpolate_string(self._model.url, self.project_vars)
mit
4,035,929,854,057,662,500
35.5
119
0.577203
false
sgivan/RNA-Seq-Toolkit
bin/rst_datafiles_setup.py
1
13438
#!/usr/bin/env python import os, sys, argparse, shutil import re, yaml, subprocess, time # start with command line options argparser = argparse.ArgumentParser(description="Parse tab-delimited file") argparser.add_argument("--infile", type=str, help="file to parse", default="file") argparser.add_argument("--verbose", action="store_true", help="verbose messages to terminal") args = argparser.parse_args() if args.verbose: print 'input file: %(filename)s' % { "filename": args.infile } f_yaml = open(args.infile, 'r') # open infile config = yaml.load(f_yaml) curdir=os.getcwd() if args.verbose: print "current working directory: '%(workdir)s'" % { "workdir": curdir } os.environ['PATH']=config['rst_path'] + "/bin" + ":" + os.environ['PATH'] clength=len(config['input']['control']) if args.verbose: print "dumping config" print yaml.dump(config) # # define some functions # # instead of composing this twice # create a function to build the file structure # def create_file_struct(sample_number, fileset, config, curdir): number_of_seq_files=len(fileset) if number_of_seq_files == 2: if args.verbose: print "\tworking with paired-end data" if config['paired']: if args.verbose: print "\tthis confirms configuration file" else: print "\tthis conflicts with configuration file\n\tplease revise\n\texiting now" sys.exit(1) try: os.chdir(config['working_datadir']) except: print "can't chdir to '%(dirname)s'" % { 'dirname': config['working_datadir'] } print "exiting now" sys.exit(2) wdir=os.getcwd() try: os.mkdir("Sample_" + str(sample_number)) except: print "can't create directory 'Sample_%(dirdigit)s' in '%(workdirname)s'" % { "dirdigit": sample_number, "workdirname": config['working_datadir'] } print "exiting now" sys.exit(3) if args.verbose: print "\tdirectory 'Sample_%(dirdigit)s' created in '%(workdirname)s'" % { "dirdigit": sample_number, "workdirname": config['working_datadir'] } try: os.chdir("Sample_" + str(sample_number)) except: print OSError if args.verbose: print "\tnow in directory " + os.getcwd() lcnt=0 for sfile in fileset: target=os.path.join(curdir,config['original_datadir'],sfile) if args.verbose: print "\tcreating symlink called '%(linkname)s' pointing to '%(linktarget)s'" % { "linkname": sfile, "linktarget": target } try: os.symlink(target,sfile) except OSError as e: print e.errno print e.filename print e.strerror lcnt += 1 if args.verbose: print "\tcreating symlink called 'set%(linkname)s.fq' pointing to '%(linktarget)s'" % { "linkname": lcnt, "linktarget": sfile } try: os.symlink(sfile, "set" + str(lcnt) + ".fq") except OSError as e: print e.errno print e.filename print e.strerr os.chdir(curdir) wdir=os.getcwd() print "\tnow in directory" + wdir else: if args.verbose: print "\tworking with non-paired-end data" def monitor_slurm_jobs(slurmjobs): # not perfect, but it works if args.verbose: print "slurm jobs to monitor:" print slurmjobs wait=1 while wait: wait=0 for jobid in slurmjobs: rtn=0 try: rtn=subprocess.check_output("squeue -o %T --noheader -j " + jobid, shell=True) except subprocess.CalledProcessError as e: print "can't call squeue with jobid %(jobid)i: %(ecode)i" % { "jobid": jobid, "ecode": e.returncode } sys.exit(15) if rtn: wait=1 if args.verbose: print "waiting for job " + jobid time.sleep(5) # # end of functions # # # create new working directory # fail if the directory already exists # slurmjobs=[] filemap={} filemap['control']=[] filemap['experimental']=[] if config['setup_files']: print "\nsetting up file structure for input files\n" if not os.access(config['working_datadir'], os.F_OK): if args.verbose: print "creating new directory to place renamed files: %(newdir)s." % { "newdir": config['working_datadir'] } if os.access(os.path.split(config['working_datadir'])[0], os.W_OK): os.mkdir(config['working_datadir']) else: print "can't create the directory" sys.exit(4) else: print "%(newdir)s already exists. Will not overwrite -- please rename or move the diretory." % { "newdir": config['working_datadir'] } print "Exiting now." sys.exit(5) # # end of working directory section # sample_number=0 # clength=len(config['input']['control']) if args.verbose: print "control replicates: %(clen)i" % { "clen": clength } for i in config['input']['control']: sample_number += 1 if args.verbose: print "\n\tcontrol - %(repname)s will be given symbolic name 'Sample_%(sint)s'" % { "repname": i, "sint": sample_number } create_file_struct(sample_number, config['input']['control'][i], config, curdir) filemap['control'].append([i, 'Sample_' + str(sample_number)]) if 'experimental' in config['input']: elength=len(config['input']['experimental']) if args.verbose: print "experimental data sets: %(length)i" % { "length": elength } efiles={} for i in config['input']['experimental']: efiles[i]=[] # # This cut corresponds to the sample replicate. There can be any number of sample replicates. which will have either a single file (non-PE) or a pair of files (Paired End) # if args.verbose: print "sample replicates in set %(eset)s: %(filenames)s" % { "eset": i, "filenames": config['input']['experimental'][i] } number_of_reps=len(config['input']['experimental'][i]) if args.verbose: print "number of replicates: %(numseqs)i." % { "numseqs": number_of_reps } for j in config['input']['experimental'][i]: sample_number += 1 if args.verbose: print "\n\t%(setname)s - %(repname)s will be given symbolic name 'Sample_%(sint)s'" % { "setname": i, "repname": j, "sint": sample_number } create_file_struct(sample_number, config['input']['experimental'][i][j], config, curdir) efiles[i].append([j, 'Sample_' + str(sample_number)]) filemap['experimental']=efiles # # print out yaml file containing map of original files to standardized files # mapfile = file('filemap.yaml', 'w') yaml.dump(filemap, mapfile) mapfile.close() # End of yaml file map if args.verbose: print str(sample_number) + ' samples' print "Input file setup finished." if config['preprocess']: print '\n\n pre-process the input data.' try: os.mkdir(config['working_alignment_dir']) except OSError as e: print "can't create directory '%(dirname)s'." % { "dirname": config['working_alignment_dir'] } print e.errno print e try: os.chdir(config['working_alignment_dir']) except OSError as e: print "can't chdir into '%(dirname)s'." % { "dirname": config['working_alignment_dir'] } print e.errno print e print "creating symlnks to preprocess and alignment index files in " + config['working_alignment_dir'] if os.access('index.preprocess', os.F_OK): print "Will not overwrite current 'index.preprocess' symlink.\nPlease remove it." sys.exit(6) try: os.symlink(config['filter_datadir'], 'index.preprocess') except OSError as e: print "can't create index.preprocess symlink pointing to '%(dirname)s.'" % { 'dirname': config['filter_datadir'] } print e.errno print e.filename # print e.strerr if os.access('index.align', os.F_OK): print "Will now overwrite current 'index.align' symlink.\nPlease remove it." sys.exit(7) try: os.symlink(config['index_datadir'], 'index.align') except OSError as e: print "can't create index.align symlink pointing to '%(dirname)s.'" % { 'dirname': config['index_datadir'] } print e.errno print e.filename # print e.strerr if os.access('index', os.F_OK): print "Will now overwrite current 'index' symlink.\nPlease remove it." sys.exit(8) try: os.symlink('index.preprocess', 'index') except OSError as e: print "can't create index symlink pointing to index.preproces" print e.errno print e.filename # print e.strerr if args.verbose: print "preprocess and align symlinks created in " + curdir setup_script=os.path.join(config['rst_path'], 'bin', 'setup.sh') if args.verbose: print "calling setup script '%(scriptname)s'." % { "scriptname": setup_script } try: out=subprocess.check_call(setup_script, shell=True) except subprocess.CalledProcessError as e: print "call to symlink failed" print "error code: %(ecode)i" % { "ecode": e.returncode } sys.exit(9) if args.verbose: print "running RST preprocessing routines" rst_script=os.path.join(config['rst_path'], 'bin', 'RNAseq_process_data.sh') out="" try: out=subprocess.check_output(rst_script + " --preprocess_only --submit --threads " + str(config['threads']) + " Sample_*", shell=True) except subprocess.CalledProcessError as e: print "call to %(rst)s failed with error code %(ecode)i" % { "rst": rst_script, "ecode": e.returncode } sys.exit(10) os.chdir(curdir) for line in str.splitlines(out): match = re.match("OUTPUT", line) if match: words = str.split(line) id = words[-1] slurmjobs.append(id) if config['align']: monitor_slurm_jobs(slurmjobs) if config['align']: if args.verbose: print "\n\naligning data to reference genome sequence" os.chdir(config['working_alignment_dir']) try: os.remove('index') except OSError as e: print "can't remove index symlink: %(ecode)i" % { "ecode": e.errno } sys.exit(11) try: os.symlink('index.align', 'index') except OSError as e: print "can't create index symlink to index.align: %(ecode)i" % { "ecode": e.errno } sys.exit(12) rst_script=os.path.join(config['rst_path'], 'bin', 'RNAseq_process_data.sh') try: subprocess.check_call(rst_script + " --partial --submit --threads " + str(config['threads']) + " Sample_*", shell=True) except subprocess.CalledProcessError as e: print "can't call %(scriptname)s: %(ecode)i" % { "scriptname": rst_script, "ecode": e.returncode } sys.exit(13) if config['diff_expression']: filemapfile = file('filemap.yaml', 'r') filemap=yaml.load(filemap_file) try: os.mkdir('DEA') except OSError as e: print "can't create DEA directory: %(ecode)s" % { "ecode": e.strerror } sys.exit(13) os.chdir('DEA') files=os.listdir('../align') for filename in files: fmatch=re.match("Sample_", filename) if (fmatch): print "Creating symlink to '%(filename)s'" % { 'filename': filename } os.symlink("../align/" + str(filename), str(filename)) os.chdir(filename) rst_script=os.path.join(config['rst_path'], 'bin', 'STAR_merge_gene_counts.py') if config['paired']: try: subprocess.check_call([rst_script]) except OSError as e: print "can't run %(scriptname)s" % { 'scriptname': rst_script } sys.exit(14) else: try: subprocess.check_call([rst_script, '--seonly']) except OSError as e: print "can't run %(scriptname)s --seonly" % { 'scriptname': rst_script } os.chdir(os.path.join(curdir, 'DEA')) # # copy & run make_gene_cnts_per_sample.sh script from rst directory to curdir # shutil.copyfile(os.path.join(config['rst_path'], 'bin', 'make_gene_cnts_per_sample.sh'), 'make_gene_cnts_per_sample.sh') try: subprocess.check_call(['sh', 'make_gene_cnts_per_sample.sh']) except OSError as e: print "can't run make_gene_cnts_per_sample.sh: %(estring)s" % { 'estring': e.strerror } sys.exit(15) # # copy & run join_gene_cnts.sh script from rst directory to curdir # shutil.copyfile(os.path.join(config['rst_path'], 'bin', 'join_gene_cnts.sh'), 'join_gene_cnts.sh') try: subprocess.check_call(['sh', 'join_gene_cnts.sh']) except OSError as e: print "can't run join_gene_cnts.sh: %(estring)s" % { 'estring': e.strerror } sys.exit(16) for j in config['input']['experimental'][i]: jlength=length(j) datafilename= deseq2_script=os.path.join(config['rst_path'], 'bin', 'create_DESeq2_cmd_sbatch_file.py') subprocess.check_call([deseq2_script, '--numberOfControls=' + elength, '--numberOfExperimentals=' + jlength, '--datafile', '--prefix')
gpl-3.0
1,884,129,174,556,674,300
34.550265
173
0.599271
false
jonathansick/xvistaprof
setup.py
1
1081
#!/usr/bin/env python # encoding: utf-8 import codecs import os from setuptools import setup, find_packages PACKAGE = "xvistaprof" NAME = "xvistaprof" DESCRIPTION = "Astropy reader for XVISTA profile tables" AUTHOR = "Jonathan Sick" AUTHOR_EMAIL = "[email protected]" URL = "https://github.com/jonathansick/xvistaprof/" VERSION = __import__(PACKAGE).__version__ def read(fname): return codecs.open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name=NAME, version=VERSION, description=DESCRIPTION, long_description=read("README.rst"), author=AUTHOR, author_email=AUTHOR_EMAIL, license="BSD", url=URL, packages=find_packages(exclude=["tests.*", "tests"]), classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Science/Research", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", ], zip_safe=False, )
bsd-2-clause
-8,398,321,683,709,075,000
24.738095
77
0.644773
false
luispedro/waldo
waldo/goslim/models.py
1
2258
# -*- coding: utf-8 -*- # Copyright (C) 2009-2013, Luis Pedro Coelho <[email protected]> # vim: set ts=4 sts=4 sw=4 expandtab smartindent: # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import division from sqlalchemy import Column, String, Integer, ForeignKey from sqlalchemy.orm import relation, backref from sqlalchemy.ext.declarative import declarative_base from waldo.backend import Base from waldo.go.models import Term as GOTerm class SlimSet(Base): __tablename__ = 'go_slim_set' id = Column(String(24), primary_key=True) def __init__(self, id): self.id = id class SlimTerm(Base): __tablename__ = 'go_slim_term' id = Column(Integer, primary_key=True) name = Column(String(24)) slim_set = Column(String(24), ForeignKey('go_slim_set.id')) def __init__(self, name, slim_set): self.name = name self.slim_set = slim_set class SlimMapping(Base): __tablename__ = 'go_slim_map' id = Column(Integer, primary_key=True) full_id = Column(String(24), ForeignKey(GOTerm.id), index=True) slim_id = Column(String(24), ForeignKey(SlimTerm.id)) def __init__(self, full_id, slim_id): self.full_id = full_id self.slim_id = slim_id
mit
8,286,669,829,951,545,000
38.614035
80
0.714349
false
OCA/l10n-brazil
l10n_br_fiscal/models/icms_regulation.py
1
50489
# Copyright (C) 2019 Renato Lima - Akretion <[email protected]> # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html from lxml import etree from odoo import api, fields, models from ..constants.fiscal import ( FISCAL_OUT, TAX_DOMAIN_ICMS, TAX_DOMAIN_ICMS_FCP, TAX_DOMAIN_ICMS_ST, ) from ..constants.icms import ICMS_ORIGIN_TAX_IMPORTED VIEW = """ <page name="uf_{0}" string="{1}"> <notebook> <page name="uf_{0}_internal" string="Interno"> <group name="icms_internal_{0}" string="Internal"> <field name="icms_internal_{0}_ids" context="{{'tree_view_ref': 'l10n_br_fiscal.tax_definition_icms_tree', 'default_icms_regulation_id': id, 'default_tax_group_id': {2}, 'default_state_from_id': {5}}}"/> </group> <group name="icms_external_{0}" string="External"> <field name="icms_external_{0}_ids" context="{{'tree_view_ref': 'l10n_br_fiscal.tax_definition_icms_tree', 'default_icms_regulation_id': id, 'default_tax_group_id': {2}, 'default_state_from_id': {5}}}"/> </group> </page> <page name="uf_{0}_st" string="ST"> <field name="icms_st_{0}_ids" context="{{'tree_view_ref': 'l10n_br_fiscal.tax_definition_icms_tree', 'default_icms_regulation_id': id, 'default_tax_group_id': {3}, 'default_state_from_id': {5}}}"/> </page> <page name="uf_{0}_others" string="Outros"> <field name="icms_fcp_{0}_ids" context="{{'tree_view_ref': 'l10n_br_fiscal.tax_definition_icms_tree', 'default_icms_regulation_id': id, 'default_tax_group_id': {4}, 'default_state_from_id': {5}}}"/> </page> </notebook> </page> """ # noqa class ICMSRegulation(models.Model): _name = "l10n_br_fiscal.icms.regulation" _inherit = ["mail.thread", "mail.activity.mixin"] _description = "Tax ICMS Regulation" name = fields.Text(string="Name", required=True, index=True) icms_imported_tax_id = fields.Many2one( comodel_name="l10n_br_fiscal.tax", string="ICMS Tax Imported", domain=[("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS)], ) icms_internal_ac_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal AC", domain=[ ("state_from_id.code", "=", "AC"), ("state_to_ids.code", "=", "AC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ac_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External AC", domain=[ ("state_from_id.code", "=", "AC"), ("state_to_ids.code", "!=", "AC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ac_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST AC", domain=[ ("state_from_id.code", "=", "AC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ac_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP AC", domain=[ ("state_from_id.code", "=", "AC"), ("state_to_ids.code", "=", "AC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_al_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal AL", domain=[ ("state_from_id.code", "=", "AL"), ("state_to_ids.code", "=", "AL"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_al_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External AL", domain=[ ("state_from_id.code", "=", "AL"), ("state_to_ids.code", "!=", "AL"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_al_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST AL", domain=[ ("state_from_id.code", "=", "AL"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_al_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP AL", domain=[ ("state_from_id.code", "=", "AL"), ("state_to_ids.code", "=", "AL"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_am_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal AM", domain=[ ("state_from_id.code", "=", "AM"), ("state_to_ids.code", "=", "AM"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_am_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External AM", domain=[ ("state_from_id.code", "=", "AM"), ("state_to_ids.code", "!=", "AM"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_am_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST AM", domain=[ ("state_from_id.code", "=", "AM"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_am_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP AM", domain=[ ("state_from_id.code", "=", "AM"), ("state_to_ids.code", "=", "AM"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ap_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal AP", domain=[ ("state_from_id.code", "=", "AP"), ("state_to_ids.code", "=", "AP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ap_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External AP", domain=[ ("state_from_id.code", "=", "AP"), ("state_to_ids.code", "!=", "AP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ap_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST AP", domain=[ ("state_from_id.code", "=", "AP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ap_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP AP", domain=[ ("state_from_id.code", "=", "AP"), ("state_to_ids.code", "=", "AP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ba_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal BA", domain=[ ("state_from_id.code", "=", "BA"), ("state_to_ids.code", "=", "BA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ba_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External BA", domain=[ ("state_from_id.code", "=", "BA"), ("state_to_ids.code", "!=", "BA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ba_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST BA", domain=[ ("state_from_id.code", "=", "BA"), ("state_to_ids.code", "=", "BA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ba_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP BA", domain=[ ("state_from_id.code", "=", "BA"), ("state_to_ids.code", "=", "BA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ce_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal CE", domain=[ ("state_from_id.code", "=", "CE"), ("state_to_ids.code", "=", "CE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ce_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External CE", domain=[ ("state_from_id.code", "=", "CE"), ("state_to_ids.code", "!=", "CE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ce_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST CE", domain=[ ("state_from_id.code", "=", "CE"), ("state_to_ids.code", "=", "CE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ce_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP CE", domain=[ ("state_from_id.code", "=", "CE"), ("state_to_ids.code", "=", "CE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_df_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal DF", domain=[ ("state_from_id.code", "=", "DF"), ("state_to_ids.code", "=", "DF"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_df_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External DF", domain=[ ("state_from_id.code", "=", "DF"), ("state_to_ids.code", "!=", "DF"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_df_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST DF", domain=[ ("state_from_id.code", "=", "DF"), ("state_to_ids.code", "=", "DF"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_df_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP DF", domain=[ ("state_from_id.code", "=", "DF"), ("state_to_ids.code", "=", "DF"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_es_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal ES", domain=[ ("state_from_id.code", "=", "ES"), ("state_to_ids.code", "=", "ES"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_es_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External ES", domain=[ ("state_from_id.code", "=", "ES"), ("state_to_ids.code", "!=", "ES"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_es_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST ES", domain=[ ("state_from_id.code", "=", "ES"), ("state_to_ids.code", "=", "ES"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_es_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP ES", domain=[ ("state_from_id.code", "=", "ES"), ("state_to_ids.code", "=", "ES"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_go_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal GO", domain=[ ("state_from_id.code", "=", "GO"), ("state_to_ids.code", "=", "GO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_go_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External GO", domain=[ ("state_from_id.code", "=", "GO"), ("state_to_ids.code", "!=", "GO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_go_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST GO", domain=[ ("state_from_id.code", "=", "GO"), ("state_to_ids.code", "=", "GO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_go_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP GO", domain=[ ("state_from_id.code", "=", "GO"), ("state_to_ids.code", "=", "GO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ma_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal MA", domain=[ ("state_from_id.code", "=", "MA"), ("state_to_ids.code", "=", "MA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ma_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External MA", domain=[ ("state_from_id.code", "=", "MA"), ("state_to_ids.code", "!=", "MA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ma_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST MA", domain=[ ("state_from_id.code", "=", "MA"), ("state_to_ids.code", "=", "MA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ma_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP MA", domain=[ ("state_from_id.code", "=", "MA"), ("state_to_ids.code", "=", "MA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_mt_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal MT", domain=[ ("state_from_id.code", "=", "MT"), ("state_to_ids.code", "=", "MT"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_mt_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External MT", domain=[ ("state_from_id.code", "=", "MT"), ("state_to_ids.code", "!=", "MT"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_mt_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST MT", domain=[ ("state_from_id.code", "=", "MT"), ("state_to_ids.code", "=", "MT"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_mt_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP MT", domain=[ ("state_from_id.code", "=", "MT"), ("state_to_ids.code", "=", "MT"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ms_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal MS", domain=[ ("state_from_id.code", "=", "MS"), ("state_to_ids.code", "=", "MS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ms_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External MS", domain=[ ("state_from_id.code", "=", "MS"), ("state_to_ids.code", "!=", "MS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ms_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST MS", domain=[ ("state_from_id.code", "=", "MS"), ("state_to_ids.code", "=", "MS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ms_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP MS", domain=[ ("state_from_id.code", "=", "MS"), ("state_to_ids.code", "=", "MS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_mg_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal MG", domain=[ ("state_from_id.code", "=", "MG"), ("state_to_ids.code", "=", "MG"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_mg_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External MG", domain=[ ("state_from_id.code", "=", "MG"), ("state_to_ids.code", "!=", "MG"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_mg_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST MG", domain=[ ("state_from_id.code", "=", "MG"), ("state_to_ids.code", "=", "MG"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_mg_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP MG", domain=[ ("state_from_id.code", "=", "MG"), ("state_to_ids.code", "=", "MG"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_pa_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal PA", domain=[ ("state_from_id.code", "=", "PA"), ("state_to_ids.code", "=", "PA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_pa_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External PA", domain=[ ("state_from_id.code", "=", "PA"), ("state_to_ids.code", "!=", "PA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_pa_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST PA", domain=[ ("state_from_id.code", "=", "PA"), ("state_to_ids.code", "=", "PA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_pa_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP PA", domain=[ ("state_from_id.code", "=", "PA"), ("state_to_ids.code", "=", "PA"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_pb_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal PB", domain=[ ("state_from_id.code", "=", "PB"), ("state_to_ids.code", "=", "PB"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_pb_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External PB", domain=[ ("state_from_id.code", "=", "PB"), ("state_to_ids.code", "!=", "PB"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_pb_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST PB", domain=[ ("state_from_id.code", "=", "PB"), ("state_to_ids.code", "=", "PB"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_pb_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP PB", domain=[ ("state_from_id.code", "=", "PB"), ("state_to_ids.code", "=", "PB"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_pr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal PR", domain=[ ("state_from_id.code", "=", "PR"), ("state_to_ids.code", "=", "PR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_pr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External PR", domain=[ ("state_from_id.code", "=", "PR"), ("state_to_ids.code", "!=", "PR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_pr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST PR", domain=[ ("state_from_id.code", "=", "PR"), ("state_to_ids.code", "=", "PR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_pr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP PR", domain=[ ("state_from_id.code", "=", "PR"), ("state_to_ids.code", "=", "PR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_pe_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal PE", domain=[ ("state_from_id.code", "=", "PE"), ("state_to_ids.code", "=", "PE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_pe_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External PE", domain=[ ("state_from_id.code", "=", "PE"), ("state_to_ids.code", "!=", "PE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_pe_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST PE", domain=[ ("state_from_id.code", "=", "PE"), ("state_to_ids.code", "=", "PE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_pe_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP PE", domain=[ ("state_from_id.code", "=", "PE"), ("state_to_ids.code", "=", "PE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_pi_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal PI", domain=[ ("state_from_id.code", "=", "PI"), ("state_to_ids.code", "=", "PI"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_pi_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External PI", domain=[ ("state_from_id.code", "=", "PI"), ("state_to_ids.code", "!=", "PI"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_pi_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST PI", domain=[ ("state_from_id.code", "=", "PI"), ("state_to_ids.code", "=", "PI"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_pi_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP PI", domain=[ ("state_from_id.code", "=", "PI"), ("state_to_ids.code", "=", "PI"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_rn_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal RN", domain=[ ("state_from_id.code", "=", "RN"), ("state_to_ids.code", "=", "RN"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_rn_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External RN", domain=[ ("state_from_id.code", "=", "RN"), ("state_to_ids.code", "!=", "RN"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_rn_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST RN", domain=[ ("state_from_id.code", "=", "RN"), ("state_to_ids.code", "=", "RN"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_rn_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP RN", domain=[ ("state_from_id.code", "=", "RN"), ("state_to_ids.code", "=", "RN"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_rs_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal RS", domain=[ ("state_from_id.code", "=", "RS"), ("state_to_ids.code", "=", "RS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_rs_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External RS", domain=[ ("state_from_id.code", "=", "RS"), ("state_to_ids.code", "!=", "RS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_rs_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST RS", domain=[ ("state_from_id.code", "=", "RS"), ("state_to_ids.code", "=", "RS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_rs_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP RS", domain=[ ("state_from_id.code", "=", "RS"), ("state_to_ids.code", "=", "RS"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_rj_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal RJ", domain=[ ("state_from_id.code", "=", "RJ"), ("state_to_ids.code", "=", "RJ"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_rj_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External RJ", domain=[ ("state_from_id.code", "=", "RJ"), ("state_to_ids.code", "!=", "RJ"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_rj_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST RJ", domain=[ ("state_from_id.code", "=", "RJ"), ("state_to_ids.code", "=", "RJ"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_rj_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP RJ", domain=[ ("state_from_id.code", "=", "RJ"), ("state_to_ids.code", "=", "RJ"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_ro_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal RO", domain=[ ("state_from_id.code", "=", "RO"), ("state_to_ids.code", "=", "RO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_ro_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External RO", domain=[ ("state_from_id.code", "=", "RO"), ("state_to_ids.code", "!=", "RO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_ro_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST RO", domain=[ ("state_from_id.code", "=", "RO"), ("state_to_ids.code", "=", "RO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_ro_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP RO", domain=[ ("state_from_id.code", "=", "RO"), ("state_to_ids.code", "=", "RO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_rr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal RR", domain=[ ("state_from_id.code", "=", "RR"), ("state_to_ids.code", "=", "RR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_rr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External RR", domain=[ ("state_from_id.code", "=", "RR"), ("state_to_ids.code", "!=", "RR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_rr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST RR", domain=[ ("state_from_id.code", "=", "RR"), ("state_to_ids.code", "=", "RR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_rr_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP RR", domain=[ ("state_from_id.code", "=", "RR"), ("state_to_ids.code", "=", "RR"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_sc_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal SC", domain=[ ("state_from_id.code", "=", "SC"), ("state_to_ids.code", "=", "SC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_sc_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External SC", domain=[ ("state_from_id.code", "=", "SC"), ("state_to_ids.code", "!=", "SC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_sc_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST SC", domain=[ ("state_from_id.code", "in", ("SC", False)), ("state_to_ids.code", "in", ("SC", False)), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_sc_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP SC", domain=[ ("state_from_id.code", "=", "SC"), ("state_to_ids.code", "=", "SC"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_sp_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal SP", domain=[ ("state_from_id.code", "=", "SP"), ("state_to_ids.code", "=", "SP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_sp_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External SP", domain=[ ("state_from_id.code", "=", "SP"), ("state_to_ids.code", "!=", "SP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_sp_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST SP", domain=[ ("state_from_id.code", "=", "SP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_sp_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP SP", domain=[ ("state_from_id.code", "=", "SP"), ("state_to_ids.code", "=", "SP"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_se_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal SE", domain=[ ("state_from_id.code", "=", "SE"), ("state_to_ids.code", "=", "SE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_se_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External SE", domain=[ ("state_from_id.code", "=", "SE"), ("state_to_ids.code", "!=", "SE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_se_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST SE", domain=[ ("state_from_id.code", "=", "SE"), ("state_to_ids.code", "=", "SE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_se_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP SE", domain=[ ("state_from_id.code", "=", "SE"), ("state_to_ids.code", "=", "SE"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) icms_internal_to_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS Internal TO", domain=[ ("state_from_id.code", "=", "TO"), ("state_to_ids.code", "=", "TO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_external_to_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS External TO", domain=[ ("state_from_id.code", "=", "TO"), ("state_to_ids.code", "!=", "TO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS), ], ) icms_st_to_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS ST TO", domain=[ ("state_from_id.code", "=", "TO"), ("state_to_ids.code", "=", "TO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_ST), ], ) icms_fcp_to_ids = fields.One2many( comodel_name="l10n_br_fiscal.tax.definition", inverse_name="icms_regulation_id", string="ICMS FCP TO", domain=[ ("state_from_id.code", "=", "TO"), ("state_to_ids.code", "=", "TO"), ("tax_group_id.tax_domain", "=", TAX_DOMAIN_ICMS_FCP), ], ) @api.model def fields_view_get( self, view_id=None, view_type="form", toolbar=False, submenu=False ): view_super = super(ICMSRegulation, self).fields_view_get( view_id, view_type, toolbar, submenu ) if view_type == "form": doc = etree.fromstring(view_super.get("arch")) for node in doc.xpath("//notebook"): br_states = self.env["res.country.state"].search( [("country_id", "=", self.env.ref("base.br").id)], order="code" ) i = 0 for state in br_states: i += 1 state_page = VIEW.format( state.code.lower(), state.name, self.env.ref("l10n_br_fiscal.tax_group_icms").id, self.env.ref("l10n_br_fiscal.tax_group_icmsst").id, self.env.ref("l10n_br_fiscal.tax_group_icmsfcp").id, state.id, ) node_page = etree.fromstring(state_page) node.insert(i, node_page) view_super["arch"] = etree.tostring(doc, encoding="unicode") return view_super def map_tax_icms( self, company, partner, product, ncm=None, nbm=None, cest=None, operation_line=None, ): self.ensure_one() tax_definitions = self.env["l10n_br_fiscal.tax.definition"] icms_taxes = self.env["l10n_br_fiscal.tax"] tax_group_icms = self.env.ref("l10n_br_fiscal.tax_group_icms") # ICMS # ICMS tax imported if ( product.icms_origin in ICMS_ORIGIN_TAX_IMPORTED and company.state_id != partner.state_id and operation_line.fiscal_operation_type == FISCAL_OUT ): icms_taxes |= self.icms_imported_tax_id else: # ICMS if not ncm: ncm = product.ncm_id if not cest: cest = product.cest_id domain = [ ("icms_regulation_id", "=", self.id), ("state", "=", "approved"), ("tax_group_id", "=", tax_group_icms.id), ("state_from_id", "=", company.state_id.id), ("state_to_ids", "=", partner.state_id.id), ] icms_defs = tax_definitions.search(domain) if len(icms_defs) == 1: tax_definitions |= icms_defs else: icms_defs_specific = icms_defs.filtered( lambda d: ncm.id in d.ncm_ids.ids or nbm.id in d.nbm_ids.ids or cest.id in d.cest_ids.ids or product.id in d.product_ids.ids ) icms_defs_generic = icms_defs.filtered( lambda d: not d.ncm_ids.ids and not d.nbm_ids.ids and not d.cest_ids.ids and not d.product_ids.ids ) if icms_defs_specific: tax_definitions |= icms_defs_specific else: tax_definitions |= icms_defs_generic icms_taxes |= tax_definitions.mapped("tax_id") return icms_taxes def map_tax_icmsst( self, company, partner, product, ncm=None, nbm=None, cest=None, operation_line=None, ): self.ensure_one() tax_definitions = self.env["l10n_br_fiscal.tax.definition"] icms_taxes = self.env["l10n_br_fiscal.tax"] tax_group_icmsst = self.env.ref("l10n_br_fiscal.tax_group_icmsst") if not ncm: ncm = product.ncm_id if not cest: cest = product.cest_id # ICMS ST domain = [ ("icms_regulation_id", "=", self.id), ("state", "=", "approved"), ("state_from_id", "=", company.state_id.id), ("tax_group_id", "=", tax_group_icmsst.id), "|", ("state_to_ids", "=", partner.state_id.id), ("state_to_ids", "=", company.state_id.id), ("ncm_ids", "=", ncm.id), ("cest_ids", "=", cest.id), ] icmsst_defs = tax_definitions.search(domain) if len(icmsst_defs) == 1: tax_definitions |= icmsst_defs else: tax_definitions |= icmsst_defs.filtered( lambda d: ncm.id in d.ncm_ids.ids or nbm.id in d.nbm_ids.ids or cest.id in d.cest_ids.ids or product.id in d.product_ids.ids ) icms_taxes |= tax_definitions.mapped("tax_id") return icms_taxes def map_tax_icmsfcp( self, company, partner, product, ncm=None, nbm=None, cest=None, operation_line=None, ): self.ensure_one() tax_definitions = self.env["l10n_br_fiscal.tax.definition"] icms_taxes = self.env["l10n_br_fiscal.tax"] tax_group_icmsfcp = self.env.ref("l10n_br_fiscal.tax_group_icmsfcp") # ICMS FCP for DIFAL if ( company.state_id != partner.state_id and operation_line.fiscal_operation_type == FISCAL_OUT and not partner.is_company ): if not ncm: ncm = product.ncm_id if not cest: cest = product.cest_id domain = [ ("icms_regulation_id", "=", self.id), ("state", "=", "approved"), ("tax_group_id", "=", tax_group_icmsfcp.id), ] if operation_line.fiscal_operation_type == FISCAL_OUT: domain.append(("state_to_ids", "=", partner.state_id.id)) else: domain.append(("state_from_id", "=", partner.state_id.id)) icmsfcp_defs = tax_definitions.search(domain) if len(icmsfcp_defs) == 1: tax_definitions |= icmsfcp_defs else: icmsfcp_defs_specific = icmsfcp_defs.filtered( lambda d: ncm.id in d.ncm_ids.ids or nbm.id in d.nbm_ids.ids or cest.id in d.cest_ids.ids or product.id in d.product_ids.ids ) icmsfcp_defs_generic = icmsfcp_defs.filtered( lambda d: not d.ncm_ids.ids and not d.nbm_ids.ids and not d.cest_ids.ids and not d.product_ids.ids ) if icmsfcp_defs_specific: tax_definitions |= icmsfcp_defs_specific else: tax_definitions |= icmsfcp_defs_generic icms_taxes |= tax_definitions.mapped("tax_id") return icms_taxes def map_tax_icms_difal( self, company, partner, product, ncm=None, nbm=None, cest=None, operation_line=None, ): self.ensure_one() tax_definitions = self.env["l10n_br_fiscal.tax.definition"] icms_taxes = self.env["l10n_br_fiscal.tax"] tax_group_icms = self.env.ref("l10n_br_fiscal.tax_group_icms") # ICMS if not ncm: ncm = product.ncm_id if not cest: cest = product.cest_id domain = [ ("icms_regulation_id", "=", self.id), ("state", "=", "approved"), ("state_from_id", "=", partner.state_id.id), ("state_to_ids", "=", partner.state_id.id), ("tax_group_id", "=", tax_group_icms.id), ] icms_defs = tax_definitions.search(domain) if len(icms_defs) == 1: tax_definitions |= icms_defs else: icms_defs_specific = icms_defs.filtered( lambda d: ncm.id in d.ncm_ids.ids or nbm.id in d.nbm_ids.ids or cest.id in d.cest_ids.ids or product.id in d.product_ids.ids ) icms_defs_generic = icms_defs.filtered( lambda d: not d.ncm_ids.ids and not d.nbm_ids.ids and not d.cest_ids.ids and not d.product_ids.ids ) if icms_defs_specific: tax_definitions |= icms_defs_specific else: tax_definitions |= icms_defs_generic icms_taxes |= tax_definitions.mapped("tax_id") return icms_taxes def map_tax( self, company, partner, product, ncm=None, nbm=None, cest=None, operation_line=None, ): icms_taxes = self.env["l10n_br_fiscal.tax"] icms_taxes |= self.map_tax_icms( company, partner, product, ncm, nbm, cest, operation_line ) icms_taxes |= self.map_tax_icmsst( company, partner, product, ncm, nbm, cest, operation_line ) icms_taxes |= self.map_tax_icmsfcp( company, partner, product, ncm, nbm, cest, operation_line ) return icms_taxes
agpl-3.0
7,181,498,285,847,287,000
31.891857
215
0.499495
false
marcel-dancak/bass-cloud
server/setup.py
1
1128
#!/usr/bin/env python import os from setuptools import setup, find_packages # classifiers classifiers = [ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: End Users/Desktop', 'License :: OSI Approved :: GNU General Public License version 2.0 (GPLv2)', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Topic :: Multimedia :: Sound/Audio', ] exclude_from_packages = [ 'basscloud.catalog.migrations', 'basscloud.feedback.migrations', # 'basscloud.conf.project_template' ] # requirements with open("requirements.txt") as f: requirements = f.read().splitlines() # setup setup(name='basscloud', version=(__import__('basscloud').VERSION), description='BassCloud Server', author='Marcel Dancak', author_email='[email protected]', url='https://github.com/marcel-dancak/bass-cloud', packages=find_packages('./', exclude=exclude_from_packages), include_package_data=True, classifiers=classifiers, install_requires=requirements ) # vim: set ts=8 sts=4 sw=4 et:
gpl-3.0
5,120,484,311,682,008,000
25.857143
80
0.680851
false
shumingch/molecule_simulation
formation.py
1
1911
from bge import logic from bge import events from bge import render from mathutils import Vector gdict = logic.globalDict def carbons(this): count = 0 for bond in this.get_bonds(): if bond.name == "Carbon": count += 1 return count def form(cont): particle = cont.owner type = particle.name #look for bonds to form particle.form_bonds() if particle.charge <= 0: gdict["free"][type].discard(particle) gdict["cations"].discard(particle) particle.remove_glow() cont.script = "formation.wait" def wait(cont): particle = cont.owner if particle.charge > 0: type = particle.name gdict["free"][type].add(particle) gdict["cations"].add(particle) particle.add_glow() cont.script = "formation.form" elif particle.formalCharge < 0: particle.add_glow(2) cont.script = "formation.attract" def attract(cont): particle = cont.owner if particle.formalCharge >= 0: particle.remove_glow() cont.script = "formation.wait" else: total_force = Vector([0,0,0]) for cation in gdict["cations"]: distance = particle.getDistanceTo(cation) if distance < 50: particle.scene.addObject("reaction_sound",particle) for bond in particle["bonds"]: if bond.name == "Lone Pair": particle.unlink(bond) bond.self_destruct() #self destruct particle.link(cation) return elif distance < 250: attraction = particle.get_repulsion(cation) * 100 cation.applyForce(-attraction) total_force += attraction particle.applyForce(total_force)
mit
8,663,892,711,473,748,000
26.710145
67
0.548927
false
andlogic/email_failover
tests/test_app.py
1
1307
from flask import * import unittest import sys import json sys.path.insert(0, '../') from app import * class TestApp(unittest.TestCase): def setUp(self): TESTING = True WTF_CSRF_ENABLED = False SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:' self.app = app.test_client() self.headers = {'Content-type': 'application/json'} self.json_data = { "from": "[email protected]", "from_name": "test", "to": "[email protected]", "to_name": "test", "subject": "test", "body": "test"} def test_email(self): response = self.app.post('/email', data = json.dumps(self.json_data)) self.assertTrue(response.status_code == 415, msg=None) response = self.app.post('/email', data = json.dumps({}), headers=self.headers) self.assertTrue(response.status_code == 500, msg=None) response = self.app.post('/email', data = json.dumps(self.json_data), headers=self.headers) self.assertTrue(response.status_code == 200, msg=None) def test_email_post_query(self): response = self.app.get('/query_email_post?from_name=test') self.assertTrue(response.status_code == 200, msg=None) if __name__ == '__main__': unittest.main()
mit
8,946,571,717,285,974,000
25.673469
95
0.583015
false
de-vri-es/qtile
test/layouts/test_matrix.py
1
3618
# Copyright (c) 2011 Florian Mounier # Copyright (c) 2012, 2014-2015 Tycho Andersen # Copyright (c) 2013 Mattias Svala # Copyright (c) 2013 Craig Barnes # Copyright (c) 2014 ramnes # Copyright (c) 2014 Sean Vig # Copyright (c) 2014 Adi Sieker # Copyright (c) 2014 Chris Wesseling # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import pytest from libqtile import layout import libqtile.manager import libqtile.config from ..conftest import no_xinerama class MatrixConfig(object): auto_fullscreen = True main = None groups = [ libqtile.config.Group("a"), libqtile.config.Group("b"), libqtile.config.Group("c"), libqtile.config.Group("d") ] layouts = [ layout.Matrix(columns=2) ] floating_layout = libqtile.layout.floating.Floating() keys = [] mouse = [] screens = [] matrix_config = lambda x: \ no_xinerama(pytest.mark.parametrize("qtile", [MatrixConfig], indirect=True)(x)) @matrix_config def test_matrix_simple(qtile): qtile.testWindow("one") assert qtile.c.layout.info()["rows"] == [["one"]] qtile.testWindow("two") assert qtile.c.layout.info()["rows"] == [["one", "two"]] qtile.testWindow("three") assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three"]] @matrix_config def test_matrix_navigation(qtile): qtile.testWindow("one") qtile.testWindow("two") qtile.testWindow("three") qtile.testWindow("four") qtile.testWindow("five") qtile.c.layout.right() assert qtile.c.layout.info()["current_window"] == (0, 2) qtile.c.layout.up() assert qtile.c.layout.info()["current_window"] == (0, 1) qtile.c.layout.up() assert qtile.c.layout.info()["current_window"] == (0, 0) qtile.c.layout.up() assert qtile.c.layout.info()["current_window"] == (0, 2) qtile.c.layout.down() assert qtile.c.layout.info()["current_window"] == (0, 0) qtile.c.layout.down() assert qtile.c.layout.info()["current_window"] == (0, 1) qtile.c.layout.right() assert qtile.c.layout.info()["current_window"] == (1, 1) qtile.c.layout.right() assert qtile.c.layout.info()["current_window"] == (0, 1) @matrix_config def test_matrix_add_remove_columns(qtile): qtile.testWindow("one") qtile.testWindow("two") qtile.testWindow("three") qtile.testWindow("four") qtile.testWindow("five") qtile.c.layout.add() assert qtile.c.layout.info()["rows"] == [["one", "two", "three"], ["four", "five"]] qtile.c.layout.delete() assert qtile.c.layout.info()["rows"] == [["one", "two"], ["three", "four"], ["five"]]
mit
4,099,254,992,985,208,300
34.126214
89
0.676341
false
sue-chain/sample
test/mock_test/request_test.py
1
1470
# -*- coding: utf-8 -*- # pylint: disable=broad-except """request test """ __authors__ = ['"sue.chain" <[email protected]>'] import os import sys import mock from requests import Response from mock import MagicMock, Mock from nose.tools import assert_raises, raises, set_trace, nottest sys.path.append(os.path.dirname(os.path.split(os.path.realpath(__file__))[0])) from sample.test.mock_test.func import request class TestFunc(object): """ func test """ @mock.patch('sample.test.mock_test.func.requests') def test_request(self, mock_requests): """ """ url = "http://192.168.6.182:8100/api/site/list" mock_resp = MagicMock(spec=Response) mock_requests.get.return_value = mock_resp mock_resp.ok.return_value = True mock_resp.json.return_value= {"msg": "Success", "body": [{}]} result = request(url) # 结果检查 assert len(result) > 0 ## 调用检查 ## 参数检查 assert mock_requests.get.called assert mock_requests.get.call_count == 1 assert mock_requests.get.call_args[0][0] == url assert len(mock_requests.get.call_args_list) == 1 assert mock_requests.get.assert_called_with(call(url)) result = request(url) assert len(mock_requests.get.call_args_list) == 2 mock_requests.reset_mock() result = request(url) assert len(mock_requests.get.call_args_list) == 1
apache-2.0
7,862,095,803,766,875,000
29.125
78
0.622407
false
thadi/rk_lobbyradar
import.py
1
4040
""" this script handles the import of the lobbyradar-data from the mongodb to an rdf-store the import starts by parsing "ontologie.ttl" so the t-box exists continues to add all enities to the graph and connects them afterwards the graph will be printed out to stdout in turtle-format you can pipe the result in a file so the graph is persistend """ import pymongo from pymongo import MongoClient from bson.son import SON from bson.objectid import ObjectId import rdflib from rdflib import Graph, Literal, BNode, Namespace, RDF, RDFS, URIRef FOAF = Namespace('http://xmlns.com/foaf/0.1/') DC = Namespace('http://purl.org/dc/elements/1.1/') ORG = Namespace("http://www.w3.org/ns/org#") CGOV = Namespace('http://reference.data.gov.uk/def/central-government/') client = MongoClient() db = client.lobbyradar Entities = db.entities Relations = db.relations rdf_type = {"person": FOAF.Person, "entity": ORG.Organization} rdf_property_keys = { 'member': 'Mitglied member mitglied'.split(' '), 'executive': 'Vorsitzender ececutive executive'.split(' '), 'connection': 'Bundesdatenschutzbeauftragte Hausausweise consulting lobbyist publication sponsoring'.split(' '), 'association': 'association'.split(' '), 'donation': 'donation'.split(' '), 'committee': 'committee'.split(' '), 'activity': 'activity'.split(' '), 'business': 'business'.split(' '), 'subsidiary': 'Tochterfirma subsidiary subisdiary'.split(' '), 'government': 'government'.split(' '), 'position': 'Position position'.split(' ') } rdf_property = { 'member': FOAF.member, 'executive': ORG.executive, 'connection': FOAF.connection, 'association': ORG.association, 'donation': ORG.donation, 'committee': ORG.committee, 'activity': FOAF.activity, 'business': ORG.business, 'subsidiary': ORG.subsidiary, 'government': CGOV.government, 'position': ORG.position } def get_property(property_key): if property_key in rdf_property: return rdf_property[property_key] return False def get_property_key(relation_type): for key in rdf_property_keys: if relation_type in rdf_property_keys[key]: return key return False def get_prop(relation_type): return get_property(get_property_key(relation_type)) map_got_donation = {} def make_special_declaration(key, source, target, sname, tname): if key == 'donation': if target not in map_got_donation.keys(): map_got_donation[target] = 0 map_got_donation[target] += 1 if(map_got_donation[target] > 5): g.add((target, RDF.type, CGOV.Party)) elif key == 'government': g.add((source, RDF.type, CGOV.Politican)) g = Graph() g.bind("dc", DC) g.bind("foaf", FOAF) g.bind("org", ORG) g.bind("rdf", RDF) g.bind("rdfs", RDFS) g.bind("cgov", CGOV) g.parse('ontologie.ttl', format='turtle') for entity in Entities.find({}): node = BNode() g.add((node, DC.identifier, Literal(entity["_id"]))) g.add((node, RDF.type, rdf_type[entity['type']])) g.add((node, RDFS.label, Literal(entity["name"]))) for relation in Relations.find({}): if len(relation['entities']) < 2: continue; source = g.value(predicate=DC.identifier, object=Literal(str(relation['entities'][0]))) target = g.value(predicate=DC.identifier, object=Literal(str(relation['entities'][1]))) source_type = g.value(subject=source, predicate=RDF.type) target_type = g.value(subject=target, predicate=RDF.type) source_name = g.value(subject=source, predicate=RDFS.label) target_name = g.value(subject=target, predicate=RDFS.label) if not source or not target: continue if source_type == ORG.Organization and target_type == FOAF.Person: source, target = target, source prop = get_prop(relation['type']) make_special_declaration(get_property_key(relation['type']), source, target, source_name, target_name) if(prop): g.add((source, prop, target)) else: print(relation['type']) print g.serialize(format='turtle')
gpl-2.0
-3,313,267,060,984,968,700
32.114754
116
0.678218
false
mefly2012/platform
test/check_keys/test.py
1
10132
# -*- coding: utf-8 -*- import json zgcpwsw = {"BBD_XGXX_ID": "176f5a265e6b87d2f1f243677ef1d33b", "caseCode": "(2014)奉民二(商)初字第2829号", "actionCause": "买卖合同纠纷", "Court_litigant": "高磊;管继余", "litigantType": "null", "Def_litigant": "上海莱士血液制品股份有限公司", "applicableLaw": "《中华人民共和国民事诉讼法》第十三条第二款;《中华人民共和国民事诉讼法》第五十条;《中华人民共和国民事诉讼法》第一百四十五条第一款", "courtAcceptance_fee": "6663元", "main": "{\"审理经过\": \"原告重庆医药安通医药有限公司诉被告上海莱士血液制品股份有限公司及反诉原告上海莱士血液制品股份有限公司反诉反诉被告重庆医药安通医药有限公司买卖合同纠纷一案,原告重庆医药安通医药有限公司、反诉原告上海莱士血液制品股份有限公司分别向本院提出撤诉申请。\", \"当事人信息\": \"原告(反诉被告)重庆医药安通医药有限公司。法定代表人唐毅。委托代理人董高升,北京市鑫诺律师事务所律师。被告(反诉原告)上海莱士血液制品股份有限公司。法定代表人郑跃文。委托代理人赵文科,北京市君佑律师事务所律师。\", \"裁判结果\": \"准许原告重庆医药安通医药有限公司撤回起诉。准许反诉原告上海莱士血液制品股份有限公司撤回反诉。本诉案件受理费人民币42,738元,减半收取计人民币21,369元,由原告重庆医药安通医药有限公司负担;反诉案件受理费人民币6,663元,由反诉原告上海莱士血液制品股份有限公司负担。\", \"裁判日期\": \"二?一五年八月十二日\", \"审判人员\": \"审判长管继余代理审判员高磊人民陪审员胡梅芳\", \"本院认为\": \"本院认为,当事人有权在法律规定的范围内处分自己的民事权利和诉讼权利。现原告及反诉原告向本院申请撤诉,与法无悖,应予准许。依照《中华人民共和国民事诉讼法》第十三条第二款、第五十条、第一百四十五条第一款之规定,裁定如下:\", \"书记员\": \"书记员夏萍\"}", "Pro_litigant": "重庆医药安通医药有限公司", "title": "重庆医药安通医药有限公司与上海莱士血液制品股份有限公司买卖合同纠纷一审民事裁定书", "sentenceDate": "2015年08月12日", "BBD_UPTIME": "1143725110", "url": "null", "BBD_DOTIME": "2016年03月07日", "caseType": "民事", "doc_Type": "民事裁定书", "caseOutcome": "起诉方 撤诉", "trialCourt": "上海市奉贤区人民法院", "Def_other_related": "赵文科,北京市君佑律师事务所", "BBD_QYXX_ID_NAME_LIST": "{\"上海莱士血液制品股份有限公司\":\"3eafbead88d74b2ea6121a11d0b13f09\"}", "Pro_other_related": "董高升,北京市鑫诺律师事务所"} zgcpwswkey = ('BBD_QYXX_ID_NAME_LIST', 'BBD_XGXX_ID', '_id', 'title', 'main', 'caseCode', 'actionCause', 'caseType', 'caseOutcome', 'sentenceDate', 'trialCourt', 'Court_litigant', 'Pro_litigant', 'Def_litigant', 'Pro_other_related', 'Def_other_related', 'applicableLaw', 'url', 'litigantType', 'courtAcceptance_fee', 'doc_Type', 'BBD_DOTIME', 'BBD_UPTIME') ktgg = {"BBD_XGXX_ID": "c863f118c2e3d4a21f302b6d7954a585", "caseCode": "(2015)奉民三(民)初字第2901号", "actionCause": "劳动合同纠纷", "city": "上海", "litigant": "上海莱士血液制品股份有限公司", "main": "奉贤     第十五法庭  2015-07-24上午09点00分  (2015)奉民三(民)初字第2901号  劳动合同纠纷  民三庭  朱洁  张澄琪  上海莱士血液制品股份有限公司", "trialDate": "2015年07月24日", "title": "null", "BBD_UPTIME": "1547402611", "BBD_QYXX_ID_NAME_LIST": "{\"上海莱士血液制品股份有限公司\":\"3eafbead88d74b2ea6121a11d0b13f09\"}", "BBD_DOTIME": "1970年01月19日"} ktggkeys = ('BBD_QYXX_ID_NAME_LIST', 'BBD_XGXX_ID', 'main', 'city', 'actionCause', 'litigant', 'caseCode', 'trialDate', 'title', 'BBD_DOTIME', 'BBD_UPTIME') zhixing = {"BBD_XGXX_ID": "970ff5f93602f97e8f6a0570e7702307", "caseCode": "(2014)渝一中法公执字第00720号", "execSubject": "2528.11", "pname": "重庆市黔江区先登小额贷款有限公司", "companyName": "重庆市黔江区先登小额贷款有限公司", "pname_id": "null", "execCourtName": "重庆市第一中级人民法院", "caseCreateTime": "2014年11月10日", "case_state": "执行中", "BBD_UPTIME": "1547402594", "BBD_QYXX_ID_NAME_LIST": "{\"重庆市黔江区先登小额贷款有限公司\":\"c69a7d6e957147d0bab4792cdfab3d65\"}", "BBD_DOTIME": "2015年07月02日"} zhixingkeys = ('BBD_QYXX_ID_NAME_LIST', 'BBD_XGXX_ID', 'company_name', 'pname', 'case_state', 'pname_id', 'execCourtName', 'caseCreateTime', 'caseCode', 'execSubject', 'BBD_DOTIME', 'BBD_UPTIME') dishonesty = {"BBD_XGXX_ID": "23df90f1cda05a3e96800c0ed39e8cb1", "caseCode": "(2015)江法民执字第00203号", "concreteSituation": "其他有履行能力而拒不履行生效法律文书确定义务", "pname": "重庆市黔江区先登小额贷款有限公司", "execCourtName": "重庆市江北区人民法院", "performDegree": "全部未履行", "pubDate": "2015年01月26日", "BBD_UPTIME": "1547411162", "frName": "贾旭东", "BBD_DOTIME": "1970年01月19日", "province": "重庆", "exeCode": "(2014)江法民初字第07991;07993号", "definiteObligation": "null", "pname_id": "563490736", "caseCreateTime": "2015年01月04日", "BBD_QYXX_ID_NAME_LIST": "{\"重庆市黔江区先登小额贷款有限公司\":\"c69a7d6e957147d0bab4792cdfab3d65\"}", "execBasUnit": "重庆市江北区人民法院"} dishonesty_keys = ('id', 'BBD_QYXX_ID_NAME_LIST', 'BBD_XGXX_ID', 'id', 'pname', 'pname_id', 'frName', 'execCourtName', 'province', 'exeCode', 'caseCreateTime', 'caseCode', 'execBasUnit', 'definiteObligation', 'performDegree', 'concreteSituation', 'pubDate', 'BBD_DOTIME', 'BBD_UPTIME') zhuanli = {"BBD_XGXX_ID": "f032d760f7f275f749e35ad0bbcdad17", "publiCodes": "CN102845797A", "classCode": "A23L2/38;A23L2/00;A;A23;A23L;A23L2", "address": "408500;重庆市武隆县巷口镇芙蓉路33号", "agent_name": "秦力军", "main_classCode": "A23L2/38;A23L2/00;A;A23;A23L;A23L2", "companyName": "重庆市集银中药材有限责任公司", "NPC_code": "重庆;85", "independentClaim": "金银花茎和/或叶中芳香水的提取方法,其特征在于,具体包括以下步骤:以金银花的茎和/或叶为原料,在所述原料中加入水介质浸泡0.5?5小时后,用水蒸气蒸馏法提取2?14小时,得金银花的芳香水,所述水介质与所述原料的液料体积比为1?14∶1。", "law_state": "公开,实质审查的生效,实质审查的生效", "patentAgency": "北京元本知识产权代理事务所;11308", "title": "金银花茎叶芳香水的制备与应用", "BBD_UPTIME": "1143719928", "url": "null", "BBD_DOTIME": "2015年12月29日", "patentType": "发明专利", "applicant": "重庆市集银中药材有限责任公司", "publiDate": "2013年01月02日", "inventor": "谭红军;杨勇;陈岗;吴振;李晓华;康活泼", "applicationCode": "CN201210315136.2", "BBD_QYXX_ID_NAME_LIST": "{\"重庆市集银中药材有限责任公司\":\"981aa02f89a7454abfa02e8640b01e72\"}", "applicationDate": "2012年08月30日"} zhuanli_keys = ('BBD_QYXX_ID_NAME_LIST', 'BBD_XGXX_ID', 'company_name', 'url', 'patentAgency', 'agent_name', 'NPC_code', 'law_state', 'patentType', 'main_classCode', 'publiDate', 'publiCodes', 'classCode', 'inventor', 'title', 'applicationDate', 'applicationCode', 'independentClaim', 'applicant', 'address', 'BBD_DOTIME', 'BBD_UPTIME') zuzhijigoudm = {"BBD_XGXX_ID": "481995d300e9f60467013788485c9438", "jgmc": "重庆卓筑建筑设计咨询事务所", "jgdm": "585737429", "do_time": "1970年01月19日", "jgdjzh": "500242200016667", "BBD_QYXX_ID_NAME_LIST": "{\"重庆卓筑建筑设计咨询事务所\":\"dafc800c6aff4698af855ca223a3800f\"}", "uptime": "1547406946"} zhuzijigoudm_keys = ['do_time', 'jgdjzh', 'jgdm', 'jgmc', 'uptime'] if __name__ == '__main__': print 'zgcpwsw', set(zgcpwswkey).difference(zgcpwsw.keys()) print 'ktgg', set(ktggkeys).difference(ktgg.keys()) print 'zhuanli', set(zhuanli_keys).difference(zhuanli.keys()) print 'dishonesty', set(dishonesty_keys).difference(dishonesty.keys()) print 'zuzhijigoudm', set(zhuzijigoudm_keys).difference(zuzhijigoudm.keys())
apache-2.0
3,107,354,876,279,709,000
53.05036
642
0.559164
false
nicaogr/Style-Transfer
Misc.py
1
4919
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Utility function for image processing """ import numpy as np import scipy def histogram_matching(org_image, match_image, grey=False, n_bins=100): ''' Matches histogram of each color channel of org_image with histogram of match_image :param org_image: image whose distribution should be remapped :param match_image: image whose distribution should be matched :param grey: True if images are greyscale :param n_bins: number of bins used for histogram calculation :return: org_image with same histogram as match_image @author: Gatys ''' if grey: hist, bin_edges = np.histogram(match_image.ravel(), bins=n_bins, density=True) cum_values = np.zeros(bin_edges.shape) cum_values[1:] = np.cumsum(hist*np.diff(bin_edges)) inv_cdf = scipy.interpolate.interp1d(cum_values, bin_edges,bounds_error=True) r = np.asarray(uniform_hist(org_image.ravel())) r[r>cum_values.max()] = cum_values.max() matched_image = inv_cdf(r).reshape(org_image.shape) else: matched_image = np.zeros_like(org_image) for i in range(3): hist, bin_edges = np.histogram(match_image[:,:,i].ravel(), bins=n_bins, density=True) cum_values = np.zeros(bin_edges.shape) cum_values[1:] = np.cumsum(hist*np.diff(bin_edges)) inv_cdf = scipy.interpolate.interp1d(cum_values, bin_edges,bounds_error=True) r = np.asarray(uniform_hist(org_image[:,:,i].ravel())) r[r>cum_values.max()] = cum_values.max() matched_image[:,:,i] = inv_cdf(r).reshape(org_image[:,:,i].shape) return matched_image def histogram_matching_gradient(org_image, match_image, grey=False, n_bins=100): ''' This function realize an histogram matching on the gradient of the image TODO ''' [b, h, w, d] = x.get_shape() b, h, w, d = tf.to_int32(b),tf.to_int32(h),tf.to_int32(w),tf.to_int32(d) tv_y_size = tf.to_float(b * (h-1) * w * d) # Nombre de pixels tv_x_size = tf.to_float(b * h * (w-1) * d) loss_y = tf.nn.l2_loss(x[:,1:,:,:] - x[:,:-1,:,:]) loss_y /= tv_y_size loss_x = tf.nn.l2_loss(x[:,:,1:,:] - x[:,:,:-1,:]) loss_x /= tv_x_size def histogram_matching_for_tf(org_image, match_image, n_bins=100): ''' Matches histogram of each color channel of org_image with histogram of match_image :param org_image: image whose distribution should be remapped :param match_image: image whose distribution should be matched :param n_bins: number of bins used for histogram calculation :return: org_image with same histogram as match_image ''' matched_image = np.zeros_like(org_image) _,_,_,numchannels = org_image.shape for i in range(numchannels): hist, bin_edges = np.histogram(match_image[:,:,i].ravel(), bins=n_bins, density=True) cum_values = np.zeros(bin_edges.shape) cum_values[1:] = np.cumsum(hist*np.diff(bin_edges)) inv_cdf = scipy.interpolate.interp1d(cum_values, bin_edges,bounds_error=True) r = np.asarray(uniform_hist(org_image[:,:,i].ravel())) r[r>cum_values.max()] = cum_values.max() matched_image[0,:,:,i] = inv_cdf(r).reshape(org_image[:,:,i].shape) return(matched_image) def uniform_hist(X): ''' Maps data distribution onto uniform histogram :param X: data vector :return: data vector with uniform histogram ''' Z = [(x, i) for i, x in enumerate(X)] Z.sort() n = len(Z) Rx = [0]*n start = 0 # starting mark for i in range(1, n): if Z[i][0] != Z[i-1][0]: for j in range(start, i): Rx[Z[j][1]] = float(start+1+i)/2.0; start = i for j in range(start, n): Rx[Z[j][1]] = float(start+1+n)/2.0; return np.asarray(Rx) / float(len(Rx)) # Those functions come from https://gist.github.com/bistaumanga/6309599 def imhist(im): # calculates normalized histogram of an image m, n = im.shape h = [0.0] * 256 for i in range(m): for j in range(n): h[im[i, j]]+=1 return np.array(h)/(m*n) def cumsum(h): # finds cumulative sum of a numpy array, list return [sum(h[:i+1]) for i in range(len(h))] def histeq(img): #calculate Histogram img2 = np.zeros(img.shape) for k in range(3): im = img[:,:,k] h = imhist(im) cdf = np.array(cumsum(h)) #cumulative distribution function sk = np.uint8(255 * cdf) #finding transfer function values s1, s2 = im.shape Y = np.zeros_like(im) # applying transfered values for each pixels for i in range(0, s1): for j in range(0, s2): Y[i, j] = sk[im[i, j]] img2[:,:,k] = Y #return transformed image, original and new istogram, # and transform function return(img2)
gpl-3.0
-7,016,098,763,295,100,000
34.905109
97
0.602765
false
rbirger/OxfordHCVNonSpatial
Non-Spatial Model Outline and Code_old.py
1
44084
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <markdowncell> # ###Description and preliminary code for Continuous-Time Markov Chain Model # # This model will test the importance of including a spatial component in the system. We will use ODEs to describe the dynamics of each lineage and competition between lineages. # The different states that each cell can move through are as follows # # * Healthy Hepatocytes # # * Latently Infected Hepatocytes # # * Infected Hepatocytes # # * Dead Infected Hepatocytes # # * Dead Healthy Hepatocytes # # Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating. # # Adapting the Perelson/Neumann model, we have # # $\begin{eqnarray*} # \frac{dT}{dt}& =& \phi_{DT} D_T + \phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\ # \frac{dE}{dt}& =& (\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\ # \frac{dI}{dt}& =& \alpha E- \nu_I I\\ # \frac{dD_T}{dt}& =& \nu_T(T+E) - \phi_{DT} D_T\\ # \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\ # \end{eqnarray*}$ # # # # # To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - e$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows # # # $$\begin{cases} # (\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\ # \nu_T T \Delta t + o(\Delta t), & a = -1, d = 1\\ # \alpha E \Delta t + o(\Delta t), & b = -1, c = 1\\ # \nu_T E \Delta t + o(\Delta t), & b = -1, d = 1\\ # \nu_I I \Delta t + o(\Delta t), & c = -1, e = 1 \\ # \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\ # \phi_{DI} D_I \Delta t + o(\Delta t), & e = -1, a = 1\\ # \end{cases}$$ # # The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows # # <!--($$ \mathbf{Q} = # \left[ \begin{array}{ccccc} # - (\beta I + \lambda +d) T & (\beta I + \lambda) T & 0 & 0 & dT \\ # 0 & -(\eta + d) L & \eta L &0 & dL \\ # 0 & 0 & -\delta I & \delta I & 0 \\ # \alpha_I D_I &0 &0 & -\alpha_I D_I&0\\ # \alpha_T D_T & 0 & 0& 0& -\alpha_T D_T\\ # \end{array} \right] $$ --> # # $$ \mathbf{Q} = # \left[ \begin{array}{ccccc} # 0& (\lambda_{virions} + \lambda_{local}) T& 0 & 0 & \nu_T T \\ # 0 & 0 & \alpha E & \nu_T E &0 \\ # 0 & 0 & 0 & 0 & \nu_I I\\ # \phi_{DT} D_T &0 &0 & 0&0\\ # \phi_{DI} D_I & 0 & 0& 0& 0\\ # \end{array} \right] $$ # <codecell> %matplotlib inline from __future__ import division import numpy as np import matplotlib.pyplot as plt # <codecell> beta=.2 nu = .01 d = 2e-2 eta = 1 delta = 3*d alpha_I = .8e-1 alpha_T = 2e-1 # <codecell> from __future__ import division import numpy as np #Number of state transitions to observe M = int(1e6) # time vector time = np.zeros(M) #Define parameters rho = 8.18 #viral export rate c = 22.3 #viral clearance rate gamma = 1500 #scaling factor R = 4.1825 #average HCV RNA in infected hepatocyte N_liver = int(8e10) #Number of cells in liver alpha = 1 #1/latent period (days) nu_T = 1.4e-2 #death rate of healthy cells nu_I = 1/7 #death rate of infected cells phi_T = 10*nu_T #regeneration rate of dead healthy cells phi_I = .8*phi_T #regeneration rate of dead infected cells beta_V = 1e-8 #viral transmision rate beta_L = R*1e-5/(60*24) #cell-cell transmission rate N=N_liver/1e6 init=10 v_init = 1e6 sim=3 Q = np.zeros(7) Q[0] = (beta_L*init + beta_V*v_init); #Infection of Target cell Q[1] = nu_T; #Death of target cell Q[2] = alpha; #latent cell becomes infected Q[3] = nu_T; #latent cell dies Q[4] = nu_I; #Infected cell dies Q[5] = phi_T; #Healthy cell regenerates Q[6] = phi_I; #Infected cell regenerates #Construct matrix of state transition vectors trans_vecs = np.zeros([5,7]) #state 1: infection of healthy cell trans_vecs[0,0] = -1; trans_vecs[1,0] = 1; #state 2: death of healthy cell trans_vecs[0,1] = -1; trans_vecs[3,1] = 1; #state 3: movement of latent cell into infected trans_vecs[1,2] = -1; trans_vecs[2,2] = 1; #state 4: death of latent cell trans_vecs[1,3] = -1; trans_vecs[3,3] = 1; #state 5: death of infected cell trans_vecs[2,4] = -1; trans_vecs[4,4] = 1; #state 6: regeneration of dead healthy cell trans_vecs[3,5] = -1; trans_vecs[0,5] = 1; #state 6: regeneration of dead infected cell trans_vecs[4,6] = -1; trans_vecs[0,6] = 1; #Initialize state variable vectors T = np.zeros(M) E = np.zeros(M) I = np.zeros(M) Dt = np.zeros(M) Di = np.zeros(M) VL = np.zeros(M) #Input initial conditions I[0] = init; T[0] = N-init; VL[0] = v_init #Initialize state vector and index #state_vec = np.vstack([S,E,I,Di,Dt]) j =0 while I[j] >0 and j<M-1: #print [T[j],E[j],I[j],Dt[j],Di[j]] #Update Q to reflect new number of infected cells and viruses Q[0] = (beta_L*I[j] +beta_V*VL[j]); #Calculate transition matrix Qij = Q*[T[j],T[j],E[j],E[j],I[j],Dt[j],Di[j]] #Draw from exponential distributions of waiting times time_vec = -np.log(np.random.random(7))/Qij #np.random.exponential([1/Qij])[0] # #find minimum waiting time and obtain index to ascertain next state jump newTime = min(time_vec) time_vecL = time_vec.tolist() state_idx = time_vecL.index(min(time_vecL)) [T[j+1],E[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx] VL[j+1] = VL[0]+rho*I[j]*R/(gamma*c) time[j+1] = time[j] + newTime j+=1 # <codecell> [T[j],E[j],I[j],Dt[j],Di[j]] rho*I[j]*R/(gamma*c) # <codecell> %%timeit np.random.exponential(y) # <codecell> y= np.ones(11) # <codecell> plt.plot(time[0:M-1],VL[0:M-1]) # <codecell> plt.plot(time,T, label = 'Susc') plt.plot(time,I, label = 'Infected') plt.plot(time,Dt, label = 'Dead (healthy)') plt.plot(time,Di, label = 'Dead (infected)') plt.legend(loc = 'upper right') # <markdowncell> # An updated version of the model includes a second latent class that keeps cells latently infected for longer before becoming infectious, and also allows for proliferation of infected cells by allowing cells to be reborn into the latent class # # * Healthy Hepatocytes # # * Latently Infected Hepatocytes # # * Long-lived Latently Infected Hepatocytes # # * Infected Hepatocytes # # * Dead Infected Hepatocytes # # * Dead Healthy Hepatocytes # # Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating. Some cells regenerate into infectious cells. # # Adapting the Perelson/Neumann model, we have # # $\begin{eqnarray*} # \frac{dT}{dt}& =& \phi_{DT} D_T + (1-\kappa)\phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\ # \frac{dE}{dt}& =& (1-\eta)(\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\ # \frac{dEX}{dt}& =& \eta(\lambda_{virions} + \lambda_{local} )T - (\alpha_X +\nu_T)E\\ # \frac{dI}{dt}& =& \kappa\phi_{DI} D_I+ \alpha E- \nu_I I\\ # \frac{dD_T}{dt}& =& \nu_T(T+E+EX) - \phi_{DT} D_T\\ # \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\ # \end{eqnarray*}$ # # To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), EX(t) I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e, f)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - f$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows # # # $$\begin{cases} # (1-\eta)(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\ # \eta(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, c = 1\\ # \nu_T T \Delta t + o(\Delta t), & a = -1, e = 1\\ # \alpha E \Delta t + o(\Delta t), & b = -1, d = 1\\ # \nu_T E \Delta t + o(\Delta t), & b = -1, e = 1\\ # \alpha_X EX \Delta t + o(\Delta t), & c = -1, d = 1\\ # \nu_T EX \Delta t + o(\Delta t), & c = -1, e = 1\\ # \nu_I I \Delta t + o(\Delta t), & d = -1, f = 1 \\ # \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\ # \kappa\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, d = 1\\ # (1-\kappa)\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, a = 1\\ # \end{cases}$$ # # The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows # # # $$ \mathbf{Q} = # \left[ \begin{array}{cccccc} # 0& (1-\eta)(\lambda_{virions} + \lambda_{local}) T& \eta(\lambda_{virions} + \lambda_{local}) T& 0 & \nu_T T &0\\ # 0 & 0 & \alpha E &0 &\nu_T E & 0\\ # 0 & 0 & \alpha_X EX &0 &\nu_T E & 0\\ # 0 & 0 & 0 & 0 & 0&\nu_I I \\ # \phi_{DT} D_T &0 &0 & 0&0&0\\ # (1-\kappa)\phi_{DI} D_I & 0 & 0& \kappa \phi_{DI}& 0&0\\ # \end{array} \right] $$ # # <codecell> %load_ext cythonmagic # <codecell> %%cython from __future__ import division import numpy as np import random class HCVHepatocyte: def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None): self.cellID = cellID #ID of cell self.parentID = parentID #ID of infector, whether it is virus or infected cell self.infType = infType #type of infection (from virus or from infected cell) self.tLat = tLat #time of infection of cell (time cell became latently infected) self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm, #dead, dead from long term self.tInf = tInf #time to become infectious self.tDead = tDead #time of death if cellType in ('Infected', 'InfectedL'): if tInf == None: print("Error: Infectious cells must have time Infectious") elif cellType in ('Dead', 'DeadL'): if tInf == None: print("Error: Dead cells must have time of death") #define method for infecting a susceptible cell def InfectCell(self, newID, simTime, newInfType): ''' Method for infecting new cell''' if self.cellType not in ['Infected', 'InfectedL']: print("Error: Latent Cell cannot infect") else: return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType) class HCVVirion: def __init__(self, virusID, parentID): self.virusID = virusID self.parentID = parentID def InfectCell(self, newID, simTime, newInfType): return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType) time = 0; cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent') #Create function to randomly select one cell to infect def CreateLatent(cellHandle, newID, state_idx, simTime): if state_idx in [0,1]: newLatent = cellHandle.InfectCell(newID, simTime, 'Latent') return newLatent elif state_idx in [2,3]: newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL') return newLatent else: print("Error: State is not an infecting transition") #Create function to Kill Infected cell def KillInfected(cellHandle, time): cellHandle.tDead = time if cellHandle.cellType == 'Infected': cellHandle.cellType = 'Dead' elif cellHandle.cellType == 'InfectedL': cellHandle.cellType = 'DeadL' else: print("Error: Cannot kill uninfected cell") return cellHandle #Create function to move latent to infectious def LatentInfectious(cellHandle, time): cellHandle.tInf = time if cellHandle.cellType == 'Latent': cellHandle.cellType = 'Infected' elif cellHandle.cellType == 'LatentL': cellHandle.cellType = 'InfectedL' else: print("Error: Cell not Latent") return cellHandle #Number of state transitions to observe M = int(1e7) # time vector time = np.zeros(M) #Define parameters init=10 #10 #initial number of infected hepatocytes v_init = 0#initial viral load ALT_init = 100 #initial ALT level rho = 8.18 #viral export rate c = 22.3 #viral clearance rate gamma = 1500 #scaling factor - R = 4.1825 #average HCV RNA in infected hepatocyte N_liver = int(1e11) #Number of cells in liver alpha = 1 #1/latent period (days) alpha_x = 1.3e-2 #1/long-term latent period nu_T = 1.4e-2 #death rate of healthy cells nu_I = 1/7 #death rate of infected cells phi_T = 10*nu_T #regeneration rate of dead healthy cells phi_I = .8*phi_T #regeneration rate of dead infected cells beta_V = .5e-8 #viral transmision rate beta_L = R*1e-5/(60*24) #cell-cell transmission rate eta = .01 #proportion of infected cells that go long-term latent kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells changes = 13; delta = .33 #ALT degradation rate N=N_liver/1e7 #initial number of hepatocytes eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production Q = np.zeros(changes) Q[0] = (1-eta)*(beta_L*init) #Infection of Target cell by cell-> latent Q[1] = (1-eta)*beta_V*v_init #Infection of Target cell by virus -> latent Q[2] = eta*beta_L*init #Infection of Target cell by cell -> long-term latent Q[3] = eta*beta_V*v_init #Infection of Target cell by virus -> long-term latent Q[4] = nu_T; #Death of target cell Q[5] = alpha; #latent cell becomes infected Q[6] = nu_T; #latent cell dies Q[7] = alpha_x #long-term latent cell becomes infected Q[8] = nu_T #long-term latent cell dies Q[9] = nu_I; #Infected cell dies Q[10] = phi_T; #Healthy cell regenerates Q[11] = (1-kappa)*phi_I; #Infected cell regenerates into healthy cell Q[12] = kappa*phi_I #Construct matrix of state transition vectors trans_vecs = np.zeros([6, changes]) #state 1: infection of healthy cell by cell-> latent trans_vecs[0,0] = -1; trans_vecs[1,0] = 1; #state 2: infection of healthy cell by virus -> latent trans_vecs[0,1] = -1; trans_vecs[1,1] = 1; #state 3: infection of healthy cell by cell -> long-term latent trans_vecs[0,2] = -1; trans_vecs[2,2] = 1; #state 4: infection of healthy cell by virus -> long-term latent trans_vecs[0,3] = -1; trans_vecs[2,3] = 1; #state 5: death of healthy cell trans_vecs[0,4] = -1; trans_vecs[4,4] = 1; #state 6: movement of latent cell into infected trans_vecs[1,5] = -1; trans_vecs[3,5] = 1; #state 7: death of latent cell trans_vecs[1,6] = -1; trans_vecs[4,6] = 1; #state 8: movement of long-term latent cell into infected trans_vecs[2,7] = -1; trans_vecs[3,7] = 1; #state 9: death of long-term latent cell trans_vecs[2,8] = -1; trans_vecs[4,8] = 1; #state 10: death of infected cell trans_vecs[3,9] = -1; trans_vecs[5,9] = 1; #state 11: regeneration of dead healthy cell trans_vecs[4,10] = -1; trans_vecs[0,10] = 1; #state 12: regeneration of dead infected cell into healthy cell trans_vecs[5,11] = -1; trans_vecs[0,11] = 1; #state 13: regeneration of dead infected cell into infected cell trans_vecs[5,12] = -1; trans_vecs[3,12] = 1; #Initialize state variable vectors T = np.zeros(M) E = np.zeros(M) Ex = np.zeros(M) I = np.zeros(M) Dt = np.zeros(M) Di = np.zeros(M) VL = np.zeros(M) ALT = np.zeros(M) state_vec = np.zeros(M) InfectionChain = [] Infecteds = [] #Initialize Infected Hepatocyte objects InfectedDict = {} for i in range(0,int(init/2)): x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0) InfectedDict[i] = x for i in range(int(init/2),init): x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0) InfectedDict[i] = x LatentDict = {} LatentLDict = {} DeadDict = {} lastCellID = init-1 #get last cellID #Input initial conditions I[0] = init; T[0] = N-init; VL[0] = v_init j =0 InfectionArray = [] while I[j] >= 0 and j<M-1: #print [T[j],E[j],I[j],Dt[j],Di[j]] #Update Q to reflect new number of infected cells and viruses Q[0] = (1-eta)*beta_L*I[j] Q[1] = (1-eta)*beta_V*VL[j] Q[2] = eta*beta_L*I[j] Q[3] = eta*beta_V*VL[j] #Calculate transition matrix Qij = Q*[T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j], Di[j]] #Draw from exponential distributions of waiting times time_vec = -np.log(np.random.random(changes))/Qij #np.random.exponential([1/Qij])[0] # #find minimum waiting time and obtain index to ascertain next state jump newTime = min(time_vec) time_vecL = time_vec.tolist() state_idx = time_vecL.index(min(time_vecL)) state_vec[j] = state_idx [T[j+1],E[j+1],Ex[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],Ex[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx] #make adjustments to hepatocyte dictionaries according to state transition #Infection of healthy cell by cell or virus -> latent or longterm latent if state_idx in [0,1,2,3]: Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell newCellID = lastCellID + 1 lastCellID = newCellID newLatent = CreateLatent(Infector, newCellID, state_idx, time[j]) if state_idx in [0,1]: LatentDict[newCellID] = newLatent elif state_idx in [2,3]: LatentLDict[newCellID] = newLatent else: print('Incorrect State') #Latent cell becomes infectious elif state_idx in [5,7]: if state_idx == 5: LatCell = LatentDict[random.choice(list(LatentDict.keys()))] del LatentDict[LatCell.cellID] #remove cell from Latent Dict elif state_idx == 7: LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))] del LatentLDict[LatCell.cellID] else: print('Incorrect State') InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict #Latent cell dies elif state_idx == 6: del LatentDict[random.choice(list(LatentDict.keys()))] #LatentL cell dies elif state_idx == 8: del LatentLDict[random.choice(list(LatentLDict.keys()))] #Infected cell dies elif state_idx == 9: KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell del InfectedDict[KilledCell.cellID] KilledCell.cellType = 'Dead' KilledCell.tDead = time[j] #newDead = KillInfected(KilledCell,time[j]) #DeadDict[newDead.cellID] = newDead DeadDict[KilledCell.cellID] = KilledCell #Dead infected cell regenerates into health cell -- just delete from dead dict elif state_idx == 11: del DeadDict[random.choice(list(DeadDict.keys()))] #Infected cell regenerated from Dead cell elif state_idx == 12: newCellID = lastCellID + 1 lastCellID = newCellID DeadGen = DeadDict[random.choice(list(DeadDict.keys()))] del DeadDict[DeadGen.cellID] newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j]) InfectedDict[newInfected.cellID] = newInfected #Output Infection chain and infecteds at each time step #check lengths of InfectionChain and Infecteds if len(InfectionChain)< int(time[j])+1: InfectionChain.append([]) if len(Infecteds) < int(time[j])+1: Infecteds.append([]) #add to array of infections with timestep if state_idx in [0,1,2,3]: #if int(time[j]) in InfectionChain: # InfectionChain[int(time[j])].append([Infector.cellID, newCellID]) #else: # InfectionChain[int(time[j])] = [[Infector.cellID, newCellID]] InfectionChain[int(time[j])].append([Infector.cellID, newCellID]) elif state_idx == 12: #if int(time[j]) in InfectionChain: # InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID]) #else: # InfectionChain[int(time[j])] = [DeadGen.cellID, newInfected.cellID] InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID]) #else: # InfectionChain.append([]) #Infecteds.append(int([time[j]),list(InfectedDict.keys())]) #if int(time[j]) in Infecteds: Infecteds[int(time[j])] = list(set(Infecteds[int(time[j])] + InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys())) #update viral load and ALT VL[j+1] = np.floor(rho*N_liver*(I[j+1]/N)*R/(gamma*c)) #VL[j] + (I[j]/N)*rho*N_liver*newTime - c*gamma*VL[j]*newTime # ALT[j+1] = ALT[j] + (eps*(nu_T*(T[j] + E[j] + Ex[j]) + nu_I*I[j])-delta*ALT[j])*newTime time[j+1] = time[j] + newTime j+=1 # <codecell> #Sort Infecteds and Infection chain, and break up infection chain InfectedsSort = dict() for i in Infecteds.keys(): InfectedsSort[i] = sorted(Infecteds[i]) InfectionChainSort = {} for i in InfectionChain.keys(): a = sorted(list(InfectionChain[i]), key=lambda x: x[0]) InfectionChainSort[i] = [b for c in a for b in c] # <codecell> #Sort Infecteds and Infection chain, and break up infection chain InfectedsSort = dict() for key, item in enumerate(Infecteds): InfectedsSort[key] = sorted(item) InfectionChainSort = dict() for key, item in enumerate(InfectionChain): a = sorted(list(item), key=lambda x: x[0]) InfectionChainSort[key] = [b for c in a for b in c] # <codecell> import csv f = open('Infecteds1e7.txt', 'w') writer = csv.writer(f, delimiter = ' ') for key, value in InfectedsSort.iteritems(): writer.writerow([key] + value) f = open('InfectionChain1e7.txt', 'w') writer = csv.writer(f, delimiter = ' ') for key, value in InfectionChainSort.iteritems(): writer.writerow([key] + value) # <codecell> f = open('Infecteds.txt', 'w') writer = csv.writer(f, delimiter = '\t') for key, value in Infecteds.iteritems(): writer.writerow([key] + [value]) # <codecell> len(InfectionChainSort) # <codecell> InfectionChainSort[10] # <codecell> InfectionChain[10] # <codecell> plt.plot(time,T, label = 'Susc') plt.plot(time,I, label = 'Infected') plt.plot(time,Dt, label = 'Dead (healthy)') plt.plot(time,Di, label = 'Dead (infected)') plt.legend(loc = 'upper right') # <codecell> plt.plot(time,VL) # <codecell> random.choice(list(InfectedDict.keys())) InfectedDict[8].cellType # <codecell> plt.plot(time,T, label = 'Susceptible') plt.plot(time,I+Di, label = 'Ever Infected') plt.legend(loc = 'upper right') # <codecell> HepatocyteDict = {} for i in range(init): x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0) HepatocyteDict[i] = x # <codecell> InfectedDict = {} for i in range(0,int(init/2)-2): x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0) InfectedDict[i] = x for i in range(int(init/2)-1,init-1): x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0) InfectedDict[i] = x # <codecell> InfectedDict[53].cellType # <codecell> #Create Module for infection functions #Build infected cell class import random class HCVHepatocyte: def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None): self.cellID = cellID #ID of cell self.parentID = parentID #ID of infector, whether it is virus or infected cell self.infType = infType #type of infection (from virus or from infected cell) self.tLat = tLat #time of infection of cell (time cell became latently infected) self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm, #dead, dead from long term self.tInf = tInf #time to become infectious self.tDead = tDead #time of death if cellType in ('Infected', 'InfectedL'): if tInf == None: print("Error: Infectious cells must have time Infectious") elif cellType in ('Dead', 'DeadL'): if tInf == None: print("Error: Dead cells must have time of death") #define method for infecting a susceptible cell def InfectCell(self, newID, simTime, newInfType): ''' Method for infecting new cell''' if self.cellType not in ['Infected', 'InfectedL']: print("Error: Latent Cell cannot infect") else: return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType) class HCVVirion: def __init__(self, virusID, parentID): self.virusID = virusID self.parentID = parentID def InfectCell(self, newID, simTime, newInfType): return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType) time = 0; cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent') #Create function to randomly select one cell to infect def CreateLatent(cellHandle, newID, state_idx, simTime): if state_idx in [0,1]: newLatent = cellHandle.InfectCell(newID, simTime, 'Latent') return newLatent elif state_idx in [2,3]: newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL') return newLatent else: print("Error: State is not an infecting transition") #Create function to Kill Infected cell def KillInfected(cellHandle, time): cellHandle.tDead = time if cellHandle.cellType == 'Infected': cellHandle.cellType = 'Dead' elif cellHandle.cellType == 'InfectedL': cellHandle.cellType = 'DeadL' else: print("Error: Cannot kill uninfected cell") return cellHandle #Create function to move latent to infectious def LatentInfectious(cellHandle, time): cellHandle.tInf = time if cellHandle.cellType == 'Latent': cellHandle.cellType = 'Infected' elif cellHandle.cellType == 'LatentL': cellHandle.cellType = 'InfectedL' else: print("Error: Cell not Latent") return cellHandle # <codecell> state_idx = 0 time = np.zeros(1e3) j=1 time[j] = 1 Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell newCellID = lastCellID + 1 lastCellID = newCellID newLatent = CreateLatent(Infector, newCellID, state_idx, time[j]) if state_idx ==0: LatentDict[newCellID] = newLatent elif state_idx == 2: LatentLDict[newCellID] = newLatent else: print('Incorrect State') # <codecell> #Try numba from numba import double from numba.decorators import jit, autojit import timeit from __future__ import division import numpy as np import random X = np.random.random((1000, 3)) D = np.empty((1000, 1000)) def pairwise_python(X): M = X.shape[0] N = X.shape[1] D = np.empty((M, M), dtype=np.float) for i in range(M): for j in range(M): d = 0.0 for k in range(N): tmp = X[i, k] - X[j, k] d += tmp * tmp D[i, j] = np.sqrt(d) return D %timeit pairwise_python(X) # <codecell> # <codecell> @autojit class HCVHepatocyte: def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None): self.cellID = cellID #ID of cell self.parentID = parentID #ID of infector, whether it is virus or infected cell self.infType = infType #type of infection (from virus or from infected cell) self.tLat = tLat #time of infection of cell (time cell became latently infected) self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm, #dead, dead from long term self.tInf = tInf #time to become infectious self.tDead = tDead #time of death if cellType in ('Infected', 'InfectedL'): if tInf == None: print("Error: Infectious cells must have time Infectious") elif cellType in ('Dead', 'DeadL'): if tInf == None: print("Error: Dead cells must have time of death") #define method for infecting a susceptible cell def InfectCell(self, newID, simTime, newInfType): ''' Method for infecting new cell''' if self.cellType not in ['Infected', 'InfectedL']: print("Error: Latent Cell cannot infect") else: return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType) class HCVVirion: def __init__(self, virusID, parentID): self.virusID = virusID self.parentID = parentID def InfectCell(self, newID, simTime, newInfType): return HCVHepatocyte(newID, self.virusID, 'Virus', simTime, newInfType) time = 0; cell1 = HCVHepatocyte(1, None, 'Virus', time, 'Latent') #Create function to randomly select one cell to infect def CreateLatent(cellHandle, newID, state_idx, simTime): if state_idx in [0,1]: newLatent = cellHandle.InfectCell(newID, simTime, 'Latent') return newLatent elif state_idx in [2,3]: newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL') return newLatent else: print("Error: State is not an infecting transition") CreateLatentNumba = autojit(CreateLatent) #Create function to Kill Infected cell def KillInfected(cellHandle, time): cellHandle.tDead = time if cellHandle.cellType == 'Infected': cellHandle.cellType = 'Dead' elif cellHandle.cellType == 'InfectedL': cellHandle.cellType = 'DeadL' else: print("Error: Cannot kill uninfected cell") return cellHandle KillInfected = autojit(KillInfected) #Create function to move latent to infectious def LatentInfectious(cellHandle, time): cellHandle.tInf = time if cellHandle.cellType == 'Latent': cellHandle.cellType = 'Infected' elif cellHandle.cellType == 'LatentL': cellHandle.cellType = 'InfectedL' else: print("Error: Cell not Latent") return cellHandle #Number of state transitions to observe M = int(1e5) # time vector time = np.zeros(M) #Define parameters init=10 #10 #initial number of infected hepatocytes v_init = 0#initial viral load ALT_init = 100 #initial ALT level rho = 8.18 #viral export rate c = 22.3 #viral clearance rate gamma = 1500 #scaling factor - R = 4.1825 #average HCV RNA in infected hepatocyte N_liver = int(1e11) #Number of cells in liver alpha = 1 #1/latent period (days) alpha_x = 1.3e-2 #1/long-term latent period nu_T = 1.4e-2 #death rate of healthy cells nu_I = 1/7 #death rate of infected cells phi_T = 10*nu_T #regeneration rate of dead healthy cells phi_I = .8*phi_T #regeneration rate of dead infected cells beta_V = .5e-8 #viral transmision rate beta_L = R*1e-5/(60*24) #cell-cell transmission rate eta = .01 #proportion of infected cells that go long-term latent kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells changes = 13; delta = .33 #ALT degradation rate N=N_liver/1e6 #initial number of hepatocytes eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production Q = np.zeros(changes) Q[0] = (1-eta)*(beta_L*init) #Infection of Target cell by cell-> latent Q[1] = (1-eta)*beta_V*v_init #Infection of Target cell by virus -> latent Q[2] = eta*beta_L*init #Infection of Target cell by cell -> long-term latent Q[3] = eta*beta_V*v_init #Infection of Target cell by virus -> long-term latent Q[4] = nu_T; #Death of target cell Q[5] = alpha; #latent cell becomes infected Q[6] = nu_T; #latent cell dies Q[7] = alpha_x #long-term latent cell becomes infected Q[8] = nu_T #long-term latent cell dies Q[9] = nu_I; #Infected cell dies Q[10] = phi_T; #Healthy cell regenerates Q[11] = (1-kappa)*phi_I; #Infected cell regenerates into healthy cell Q[12] = kappa*phi_I #Construct matrix of state transition vectors trans_vecs = np.zeros([6, changes]) #state 1: infection of healthy cell by cell-> latent trans_vecs[0,0] = -1; trans_vecs[1,0] = 1; #state 2: infection of healthy cell by virus -> latent trans_vecs[0,1] = -1; trans_vecs[1,1] = 1; #state 3: infection of healthy cell by cell -> long-term latent trans_vecs[0,2] = -1; trans_vecs[2,2] = 1; #state 4: infection of healthy cell by virus -> long-term latent trans_vecs[0,3] = -1; trans_vecs[2,3] = 1; #state 5: death of healthy cell trans_vecs[0,4] = -1; trans_vecs[4,4] = 1; #state 6: movement of latent cell into infected trans_vecs[1,5] = -1; trans_vecs[3,5] = 1; #state 7: death of latent cell trans_vecs[1,6] = -1; trans_vecs[4,6] = 1; #state 8: movement of long-term latent cell into infected trans_vecs[2,7] = -1; trans_vecs[3,7] = 1; #state 9: death of long-term latent cell trans_vecs[2,8] = -1; trans_vecs[4,8] = 1; #state 10: death of infected cell trans_vecs[3,9] = -1; trans_vecs[5,9] = 1; #state 11: regeneration of dead healthy cell trans_vecs[4,10] = -1; trans_vecs[0,10] = 1; #state 12: regeneration of dead infected cell into healthy cell trans_vecs[5,11] = -1; trans_vecs[0,11] = 1; #state 13: regeneration of dead infected cell into infected cell trans_vecs[5,12] = -1; trans_vecs[3,12] = 1; #Initialize state variable vectors T = np.zeros(M) E = np.zeros(M) Ex = np.zeros(M) I = np.zeros(M) Dt = np.zeros(M) Di = np.zeros(M) VL = np.zeros(M) ALT = np.zeros(M) state_vec = np.zeros(M) InfectionChain = dict() Infecteds = dict() #Initialize Infected Hepatocyte objects InfectedDict = {} for i in range(0,int(init/2)): x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0) InfectedDict[i] = x for i in range(int(init/2),init): x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0) InfectedDict[i] = x LatentDict = {} LatentLDict = {} DeadDict = {} lastCellID = init-1 #get last cellID #Input initial conditions I[0] = init; T[0] = N-init; VL[0] = v_init j =0 InfectionArray = [] while I[j] >= 0 and j<M-1: #print [T[j],E[j],I[j],Dt[j],Di[j]] #Update Q to reflect new number of infected cells and viruses Q[0] = (1-eta)*beta_L*I[j] Q[1] = (1-eta)*beta_V*VL[j] Q[2] = eta*beta_L*I[j] Q[3] = eta*beta_V*VL[j] #Calculate transition matrix Qij = Q*[T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j], Di[j]] #Draw from exponential distributions of waiting times time_vec = -np.log(np.random.random(changes))/Qij #np.random.exponential([1/Qij])[0] # #find minimum waiting time and obtain index to ascertain next state jump newTime = min(time_vec) time_vecL = time_vec.tolist() state_idx = time_vecL.index(min(time_vecL)) state_vec[j] = state_idx [T[j+1],E[j+1],Ex[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],Ex[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx] #make adjustments to hepatocyte dictionaries according to state transition #Infection of healthy cell by cell or virus -> latent or longterm latent if state_idx in [0,1,2,3]: Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell newCellID = lastCellID + 1 lastCellID = newCellID newLatent = CreateLatentNumba(Infector, newCellID, state_idx, time[j]) if state_idx in [0,1]: LatentDict[newCellID] = newLatent elif state_idx in [2,3]: LatentLDict[newCellID] = newLatent else: print('Incorrect State') #Latent cell becomes infectious elif state_idx in [5,7]: if state_idx == 5: LatCell = LatentDict[random.choice(list(LatentDict.keys()))] del LatentDict[LatCell.cellID] #remove cell from Latent Dict elif state_idx == 7: LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))] del LatentLDict[LatCell.cellID] else: print('Incorrect State') InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict #Latent cell dies elif state_idx == 6: del LatentDict[random.choice(list(LatentDict.keys()))] #LatentL cell dies elif state_idx == 8: del LatentLDict[random.choice(list(LatentLDict.keys()))] #Infected cell dies elif state_idx == 9: KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell del InfectedDict[KilledCell.cellID] KilledCell.cellType = 'Dead' KilledCell.tDead = time[j] #newDead = KillInfected(KilledCell,time[j]) #DeadDict[newDead.cellID] = newDead DeadDict[KilledCell.cellID] = KilledCell #Dead infected cell regenerates into health cell -- just delete from dead dict elif state_idx == 11: del DeadDict[random.choice(list(DeadDict.keys()))] #Infected cell regenerated from Dead cell elif state_idx == 12: newCellID = lastCellID + 1 lastCellID = newCellID DeadGen = DeadDict[random.choice(list(DeadDict.keys()))] del DeadDict[DeadGen.cellID] newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j]) InfectedDict[newInfected.cellID] = newInfected #Output Infection chain and infecteds at each time step #add to array of infections with timestep if state_idx in [0,1,2,3]: if int(time[j]) in InfectionChain: InfectionChain[int(time[j])].append([Infector.cellID, newCellID]) else: InfectionChain[int(time[j])] = [[Infector.cellID, newCellID]] elif state_idx == 12: if int(time[j]) in InfectionChain: InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID]) else: InfectionChain[int(time[j])] = [DeadGen.cellID, newInfected.cellID] else: if int(time[j]) not in InfectionChain: InfectionChain[int(time[j])] = [] #Infecteds.append(int([time[j]),list(InfectedDict.keys())]) if int(time[j]) in Infecteds: Infecteds[int(time[j])] = list(set(Infecteds[int(time[j])] + InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys())) else: Infecteds[int(time[j])] = InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys() #update viral load and ALT VL[j+1] = np.floor(rho*N_liver*(I[j+1]/N)*R/(gamma*c)) #VL[j] + (I[j]/N)*rho*N_liver*newTime - c*gamma*VL[j]*newTime # ALT[j+1] = ALT[j] + (eps*(nu_T*(T[j] + E[j] + Ex[j]) + nu_I*I[j])-delta*ALT[j])*newTime time[j+1] = time[j] + newTime j+=1 # <codecell> #write out function patterns for each state transition if state_idx in [0,1,2,3]: #Infection of healthy cell by cell or virus -> latent or longterm latent Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell newCellID = lastCellID + 1 lastCellID = newCellID newLatent = CreateLatent(Infector, newCellID, state_idx, time[j]) if state_idx ==0: LatentDict[newCellID] = newLatent elif state_idx == 2: LatentLDict[newCellID] = newLatent else: print('Incorrect State') elif state_idx in [6,8]: #Latent cell becomes infectious if state_idx == 6: LatCell = LatentDict[random.choice(list(LatentDict.keys()))] del LatentDict[LatCell.cellID] #remove cell from Latent Dict elif state_idx == 8: LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))] del LatentDict[LatCell.cellID] else: print('Incorrect State') InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict elif state_idx == 7: #Latent cell dies del LatentDict[random.choice(list(LatentDict.keys()))] elif state_idx == 8: #LatentL cell dies del LatentLDict[random.choice(list(LatentLDict.keys()))] elif state_idx == 10: KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell newDead = KillInfected(KilledCell,time[j]) DeadDict[newDead.cellID] = newDead elif state_idx == 13: #Infected cell regenerated from Dead cell newCellID = lastCellID + 1 lastCellID = newCellID DeadGen = DeadDict[random.choice(list(InfectedDict.keys()))] newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j]) InfectedDict[newInfected.cellID] = newInfected # <codecell> ########## elif state_idx in [1,3]: #Infection of healthy cell by virus -> latent or longterm latent InfectorID = random.choice(virionList[j]) Infector = InfectedDict[InfectorID] newCellID +=lastCellID newLatent = CreateLatent(Infector, newID, state_idx, newTime) if state_idx ==0: LatentDict[newCellID] = newLatent elif state_idx == 2: LatentLDict[newCellID] = newLatent else: print('Incorrect State') #Create virion objects from infected cells def GenerateVirions(cellDict, rho,R,gamma,c, N, N_liver): #lastVirusID, #newVirusID = lastVirusID #start ID count virionList = [] #initialize virion list nVirions = int(np.floor(rho*(N_liver/N)*R/(gamma*c))) for idx in cellDict.keys(): newVirions = np.empty(nVirions) newVirions = newVirions.fill(cellDict[idx].cellID) virionList.extend(newVirions) #for i in range(nVirions): # newVirion = [newVirusID,cellDict[idx].cellID] # virionList.append(newVirion) # newVirusID += 1 return virionList, newVirusID # <markdowncell> # Incorporating lineage dynamics: # # create class InfectedCell # # create class LatentCell # # create class LongTermCell # # if transition is infected cell, add new cell to latent class, pick one infected cell randomly and take its sequence, change it by one random step # # if transition is latent cell becomes infectious, randomly choose latent cell and move it to infectious list # # keep a snapshot of what sequences are around at each timestep # # keep an id for each cell # # Latent cell array # # add in latent cells at each time step # # # Infected cell class attributes: # # id # # parent id # # virus or cell infection # # time infectious # # time infected # # longterm # # Latent cell class attributes # # id # # parent id # # time infected # # virus or cell infection # # longterm # # # virion class attributes # # id # # parent id # # lists of infected cells and latent cells at each timestep # # # pseudo code # # create an array of latent cells # # create array of infected cells # # create list of infected cell ids # create list of latent cell ids # create list of longterm cell ids # # # export timestep and infections: which cell(s) infected which on each day # <codecell> cell2 = HCVHepatocyte(2, None, 'Virus', time, 'Infected', time+1) # <codecell> newID = 3 newLatent = CreateLatent(cell2, newID, 0, time) # <codecell> xlist= [] xlist.append(1) # <codecell> np.floor((rho*(N_liver/N)*R/(gamma*c))) # <codecell> del cell2 # <codecell> KillInfected(cell2,time) # <codecell> cell2.tDead # <codecell> cell2 # <codecell>
bsd-2-clause
-4,707,477,566,689,736,000
32.39697
563
0.637987
false
cychenyin/windmill
examples/misc/zk.py
1
6563
from kazoo.client import KazooClient, KazooState ,KeeperState import os, sys import logging import threading from threading import Event def xy_listener(state): if state == KazooState.LOST: # Register somewhere that the session was lost logging.info("session of zk lost") elif state == KazooState.SUSPENDED: # Handle being disconnected from Zookeeper logging.info("conn disconnected to zk") else: # Handle being connected/reconnected to Zookeeper logging.info("conn / reconn to zk") if __name__ == '__main__': logging.basicConfig(format='-------------- %(asctime)s %(levelname)7s: %(message)s [%(lineno)d]') logging.root.setLevel(logging.INFO) logging.info("test") zk = KazooClient(hosts='127.0.0.1:2181') zk.add_listener(xy_listener) zk.start() @zk.add_listener def watch_for_ro(state): if state == KazooState.CONNECTED: if zk.client_state == KeeperState.CONNECTED_RO: print("Read only mode!") else: print("Read/Write mode!") # Determine if a node exists # kazoo.protocol.states.ZnodeStat # ZnodeStat(czxid=7807, mzxid=7809, ctime=1450246467993, mtime=1450246468015, version=1, cversion=1, aversion=0, ephemeralOwner=0, dataLength=9, numChildren=1, pzxid=7808) result = zk.exists("/xy/test") if result: print('exists /xy/test reuslt=%s' % str(result) ) # Ensure a path, create if necessary result = zk.ensure_path("/xy/test") print('ensure_path type of reuslt=%s' % type(result) ) print('ensure_path reuslt=%s' % result ) # Determine if a node exists if zk.exists("/xy/test"): print('exists type of reuslt=%s' % type(result) ) # Create a node with data try: result = zk.create("/xy/test/node", b"a value", acl=None) except Exception, e: print('=========== exception when create node, %s' % e) else: print('=========== create /xy/test/node reuslt=%s' % result ) #reading # Determine if a node exists print('exists /xy/test/node reuslt=%s' % str(zk.exists("/xy/test/node"))) data, stat = zk.get("/xy/test/node") print("//////////////////////////// /xy/test/node Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) print("") print("") print("") # # Print the version of a node and its data # data, stat = zk.get("/xy/test") # print("Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) # # # List the children # children = zk.get_children("/xy/test") # print("There are %s children with names %s" % (len(children), children)) #update try: result = None result = zk.set("/xy/test", b"some data") except Exception, e: print('exception when zk.set, %s' % e) else: print("zk.set /xy/test result %s" % str(result)) # del result = zk.delete("/xy/test/node", recursive=True) print("zk.delete /xy/test/node result %s" % (result)) # action try: result = zk.retry(zk.get, "/xy/test/nodex") except Exception, e: print('exception when zk.retry, %s' % e) else: print("zk.retry /xy/test/nodex result %s" % str(result)) from kazoo.retry import KazooRetry kr = KazooRetry(max_tries=3, ignore_expire=False) try: result = kr(zk.get, "/xy/test/nodex") except Exception, e: print('exception when KazooRetry, %s' % e) else: print("KazooRetry zk.get /xy/test/nodex result %s" % (result)) #watcher def xy_func(event): # check to see what the children are now print("xy_func watcher event:%s" % event) # Call xy_func when the children change try: children = zk.get_children("/xy/test/node", watch=xy_func) except Exception, e: print("exception when get_childeren %s" % str(e)) else: print("zk.get_children /xy/test/node result %s" % (children)) @zk.ChildrenWatch("/xy/test") def watch_children(children): #print("watch_children of /xy/test, Children are now: %s" % str(children)) #print("watch_children of /xy/test, Children count: %d" % len(children)) pass # Above function called immediately, and from then on @zk.DataWatch("/xy/test") def watch_node(data, stat): #print("watch_node, Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) pass #trans, great!!! transaction = zk.transaction() transaction.check('/xy/test/node2', version=3) transaction.create('/xy/test/node2', b"a value") result = transaction.commit() print("transaction result %s" % str(result)) print ("----------------------------") # for i in range(1,100): # try: # result = zk.create("/xy/test/node", b"a value", acl=None, sequence=True, ephemeral=True) # except Exception, e: # print('=========== exception when create node, %s' % e) # else: # #print('=========== create /xy/test/node reuslt=%s' % result ) # pass if zk.exists("/xy/test/node"): data, stat = zk.get("/xy/test/node") print("/xy/test/node Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) else: print("/xy/test/node not exists") print ("----------------------------0") zk.create("/xy/test/node", b"a value", acl=None) data, stat = zk.get("/xy/test/node") print("/xy/test/node Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) print ("----------------------------1") zk.delete("/xy/test/node") zk.create('/xy/test/node', b"abc") data, stat = zk.get("/xy/test/node") print("/xy/test/node Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) print ("----------------------------2") zk.delete("/xy/test/node") zk.create('/xy/test/node', b"def", acl=None) data, stat = zk.get('/xy/test/node') print("/xy/test/node Version: %s, data: %s" % (stat.version, data.decode("utf-8"))) print ("----------------------------3") # ev = Event() # ev.set() # wait_seconds = 1 # threading.sleep(10) # while(True): # ev.wait(wait_seconds) # ev.clear() input = None while(input != 'quit' and input != 'exit'): print "print quit or exit to QUIT" input = raw_input() input = input.lower() print "quitting " zk.stop()
mit
5,146,062,269,092,303,000
32.835052
175
0.562091
false
botswana-harvard/ambition-subject
ambition_subject/tests/test_subject_consent.py
1
2089
import re from ambition_prn.models import OnSchedule from ambition_rando.tests import AmbitionTestCaseMixin from django.core.exceptions import ObjectDoesNotExist from django.test import TestCase, tag from django.test.utils import override_settings from edc_base.utils import get_utcnow from edc_constants.constants import UUID_PATTERN from edc_registration.models import RegisteredSubject from model_mommy import mommy from ..models import SubjectConsent @override_settings(SITE_ID='10') class TestSubjectConsent(AmbitionTestCaseMixin, TestCase): def setUp(self): self.subject_screening = mommy.make_recipe( 'ambition_screening.subjectscreening') def test_allocated_subject_identifier(self): """Test consent successfully allocates subject identifier on save. """ options = { 'screening_identifier': self.subject_screening.screening_identifier, 'consent_datetime': get_utcnow, } mommy.make_recipe('ambition_subject.subjectconsent', **options) self.assertFalse( re.match( UUID_PATTERN, SubjectConsent.objects.all()[0].subject_identifier)) def test_consent_creates_registered_subject(self): options = { 'screening_identifier': self.subject_screening.screening_identifier, 'consent_datetime': get_utcnow, } self.assertEquals(RegisteredSubject.objects.all().count(), 0) mommy.make_recipe('ambition_subject.subjectconsent', **options) self.assertEquals(RegisteredSubject.objects.all().count(), 1) def test_onschedule_created_on_consent(self): subject_consent = mommy.make_recipe( 'ambition_subject.subjectconsent', consent_datetime=get_utcnow, screening_identifier=self.subject_screening.screening_identifier) try: OnSchedule.objects.get( subject_identifier=subject_consent.subject_identifier) except ObjectDoesNotExist: self.fail('ObjectDoesNotExist was unexpectedly raised.')
gpl-3.0
-2,925,024,122,165,359,000
37.685185
80
0.695069
false
taige/PyTools
pyclda/baidu_downloader.py
1
19708
import asyncio import logging import json import os import time from concurrent.futures import CancelledError from pprint import pformat from urllib.parse import quote_plus as quote import aiohttp from aiohttp import hdrs from pyclda import get_mac_ver, async_MD5 from pyclda.aio_downloader import AioDownloader, Status from tsproxy.common import Timeout, fmt_human_time as _fmt_human_time, fmt_human_bytes as _fmt_human_bytes __all__ = ['BaiduDownloader'] logger = logging.getLogger(__name__) class BaiduDownloader: """ aio方式从Baidu Yun下载云端的一个目录或者文件 """ def __init__(self, path, path_concur=1, print_status=10, magic_param='method=download', conf_filename='pyclda.baidu.url.params.json', loop=None, **kwargs): self.path = path self.path_concur = path_concur self.print_status = print_status self.magic_param = magic_param self.loop = loop if loop is not None else asyncio.get_event_loop() self.retry_count = kwargs.get('retry_count', 10) self.baidu_request = BaiduRequest(os.path.expanduser('~/%s' % conf_filename) if os.sep not in conf_filename else conf_filename, self.loop, **kwargs) self._downloaders = [] async def download(self): if self.path.lower().startswith('http://') or self.path.lower().startswith('https://'): return await self.baidu_request.aio_download(self.path) else: return await self._download_path() async def _download_path(self): file_list, failed_list_dirs = await self.baidu_request.path_list(self.path) file_count = len(file_list) base_dir = os.path.dirname(self.path if not self.path.endswith('/') else self.path[:len(self.path) - 1]) if not base_dir.startswith('/'): base_dir = '/' + base_dir for index in range(min(self.path_concur, file_count)): self._downloaders.append(PathDownloader(self.baidu_request, base_dir, self.magic_param, self.retry_count, index, self.loop)) cancelled = None d_futures = [] failed_location_files = [] time_out = time.time() + self.print_status while True: try: for downloader in self._downloaders: if not cancelled and len(file_list) > 0 and downloader.status in ('INITIAL', 'DONE'): d_futures.append(asyncio.ensure_future(downloader.download(file_list.pop(0)))) if len(d_futures) == 0: break results, pending = await asyncio.wait(d_futures, timeout=max(0, time_out - time.time()), return_when=asyncio.FIRST_COMPLETED, loop=self.loop) if len(results) > 0: for result in results: _cncl = result.result() if _cncl is not None: if isinstance(_cncl, str): failed_location_files.append(_cncl) else: cancelled = _cncl break d_futures.clear() d_futures.extend(pending) else: time_out = time.time() + self.print_status raise asyncio.TimeoutError() except asyncio.TimeoutError: logger.warning('=====DOWNLOADER STATUS=====') self._print_downloader_status() if len(file_list) > 0: _tmp = '' for i in range(min(10, len(file_list))): _tmp += '\n\t%s(%s)' % (file_list[i]['path'], _fmt_human_bytes(file_list[i]['size'])) logger.info('files waiting process(Total: %d): %s', len(file_list), _tmp) except CancelledError as ce: cancelled = ce continue logger.debug('=====downloader status=====') self._print_downloader_status(log_level=logging.DEBUG) if len(failed_list_dirs) > 0: logger.info('FAILED list dirs:\n\t%s', failed_list_dirs) if len(failed_location_files) > 0: logger.info('FAILED location files:\n\t%s', failed_location_files) return None, None, cancelled def _print_downloader_status(self, log_level=logging.WARNING): for idx in range(len(self._downloaders)): downloader = self._downloaders[idx] status = downloader.status if isinstance(status, Status): filename = status['filename'] if filename is None: filename = status['_filename'] logger.log(log_level, 'downloader.#%d downloading [ETA %s %sB/S %d%%] %s', idx, _fmt_human_time(status.eta()), _fmt_human_bytes(status.down_speed()), status.done_percent(), filename) else: logger.log(log_level, 'downloader.#%d %s', idx, status) class PathDownloader: def __init__(self, baidu_request, base_dir, magic_param, retry_count, index, loop): self.baidu_request = baidu_request self.base_dir = base_dir self.magic_param = magic_param self.retry_count = retry_count self.index = index self.loop = loop self.status = 'INITIAL' async def download(self, file_path): try: _cancelled = await self._download(file_path) except CancelledError as _ce: _cancelled = _ce self.status = 'DONE' if (_cancelled is None or isinstance(_cancelled, str)) else 'CANCELLED' return _cancelled async def _download(self, file_info): self.status = 'READY for download task...' server_file_path = file_info['path'] self.status = 'processing %s' % server_file_path self._logger(logging.DEBUG, "processing %s", server_file_path) server_size = file_info['size'] server_md5 = file_info['md5'] local_file_path = os.path.relpath(server_file_path, self.base_dir) same_size_but_diff_md5 = False if os.path.isfile(local_file_path): local_size = os.stat(local_file_path).st_size if local_size == server_size: self.status = 'calculating md5 of %s' % local_file_path local_md5 = await async_MD5(local_file_path, loop=self.loop) if local_md5.lower() == server_md5.lower(): self._logger(logging.INFO, '%s (%s/%d) has DOWNLOADED to %s' % (server_file_path, server_md5, server_size, local_file_path)) return None else: same_size_but_diff_md5 = True self._logger(logging.INFO, '%s & %s have SAME SIZE but DIFF MD5(%d/%s:%s)' % (server_file_path, local_file_path, server_size, server_md5, local_md5)) else: self._logger(logging.INFO, '%s(%d) maybe partial downloaded at %s(%d)' % (server_file_path, server_size, local_file_path, local_size)) self.status = 'requesting location of %s' % server_file_path urls = await self.baidu_request.path_file_location(server_file_path, _logger=self._logger) if urls is None or len(urls) == 0 or 'url' not in urls[0] or not urls[0]['url']: # self.failed_location_files.append(server_file_path) self._logger(logging.WARNING, '%s location request FAILED: %s', server_file_path, pformat(urls)) return server_file_path self._logger(logging.DEBUG, '%s locations: %s', server_file_path, pformat(urls)) _cancelled = None for url_idx, j_url in enumerate(urls): if 'url' not in j_url: continue url = j_url['url'] if self.magic_param not in url: url += '&%s' % self.magic_param.lower() retry_count = max(0, self.retry_count - url_idx) if same_size_but_diff_md5: self.status = 're-requesting server-side md5 of %s' % server_file_path headers = await self.baidu_request.aio_request_with_retry(url, header=True, _logger=self._logger) server_md5 = headers.get(hdrs.CONTENT_MD5) if headers is not None else None if server_md5 and local_md5.lower() == server_md5.lower(): self._logger(logging.WARNING, '%s & %s have SAME SIZE AND MD5(%d/%s)' % (server_file_path, local_file_path, server_size, server_md5)) return None else: self._logger(logging.WARNING, '%s & %s have SAME SIZE but DIFF MD5(%d/%s:%s) RE-Download' % (server_file_path, local_file_path, server_size, server_md5, local_md5)) self._logger(logging.DEBUG, 'Downloading %s ...', server_file_path) self.status = Status(_filename=file_info['server_filename']) _done, _out_file, _cancelled = await self.baidu_request.aio_download(url, out_file=local_file_path, status=self.status, retry_count=retry_count) if not _done and _cancelled is None: self._logger(logging.WARNING, '%s download FAILED, maybe let\'s try next url', server_file_path) continue if _done: self._logger(logging.WARNING, '%s download SUCCESS, saved to %s', server_file_path, _out_file) else: self._logger(logging.WARNING, '%s download CANCELLED, check the log for detail', server_file_path) break return _cancelled def _logger(self, level, msg, *log_args, exc_info=False, **log_kwargs): if not logger.isEnabledFor(level): return logger.log(level, '_downloader.#%d ' % self.index + msg, *log_args, exc_info=exc_info, **log_kwargs) class BaiduRequest: def __init__(self, conf_filename, loop, **kwargs): try: with open(conf_filename, 'r') as f: self.conf = json.load(f) except Exception as e: logger.warning('load %s fail: %s', conf_filename, e) self.conf = {} self.loop = loop if 'headers' in kwargs: dict_headers = kwargs['headers'] dict_headers.setdefault('X-Download-From', 'baiduyun') dict_headers.setdefault('User-Agent', 'netdisk;2.1.0;pc;pc-mac;%s;macbaiduyunguanjia' % get_mac_ver()) _, _headers = self._get_baidu_url_params('headers') for k in _headers: dict_headers.setdefault(k, _headers[k]) self.kwargs = kwargs self._path_list_url = 'https://pan.baidu.com/api/list' self._file_loc_url = 'https://d.pcs.baidu.com/rest/2.0/pcs/file' async def aio_download(self, url, **kwargs): _kwargs = self.kwargs.copy() _kwargs.update(kwargs) return await AioDownloader(url, loop=self.loop, **_kwargs).download() async def path_list(self, path, _logger=logger.log): buffer_file = '.%s.file_list_buffering' % path.replace('/', '__') expired_time = 24 * 3600 if os.path.isfile(buffer_file) and (time.time() - os.stat(buffer_file).st_mtime) < expired_time: with open(buffer_file, 'r') as f: buf_j = json.load(f) file_list = buf_j['file_list'] failed_list_dirs = buf_j['failed_list_dirs'] if len(failed_list_dirs) == 0: _logger(logging.DEBUG, 'reuse buffering list') return file_list, failed_list_dirs else: file_list = [] failed_list_dirs = [] path_list_url_params = { 'devuid': '', 'channel': 'MAC_%s_MacBookPro15,1_netdisk_1099a' % get_mac_ver(), 'cuid': '', 'time': '%d' % time.time(), 'clienttype': '21', 'rand': '', 'logid': '', 'version': '2.1.0', 'vip': '0', 'limit': '1001', 'order': 'time', 'folder': '0', 'desc': '1', 'start': '0', } path_list_url = self._compose_baidu_url(self._path_list_url, **path_list_url_params) concurrent = {'a': 1} _failed_list_dirs = [] async def _request_file_list(_dir, search_depth=None, depth=1): _url = '%s&dir=%s' % (path_list_url, quote(_dir)) _logger(logging.INFO, 'listing dir: %s', _dir) j = await self.aio_request_with_retry(_url) concurrent['a'] -= 1 if j is None or 'list' not in j: _failed_list_dirs.append(_dir) _logger(logging.WARNING, 'list dir %s FAILED: %s', _dir, pformat(j)) elif len(j['list']) > 0: j['list'].sort(key=lambda _p: _p['isdir'], reverse=True) for _path in j['list']: _logger(logging.DEBUG, '%s%s%s', '\t' * depth, _path['path'], '/' if _path['isdir'] else '') while len(j['list']) > 0: futures = [] while len(j['list']) > 0: _path = j['list'].pop(0) _logger(4, 'list: %s', pformat(_path)) if _path['isdir']: if search_depth is not None and depth+1 > search_depth: continue futures.append(_request_file_list(_path['path'], search_depth=search_depth, depth=depth+1)) concurrent['a'] += 1 if concurrent['a'] >= 10: break else: file_list.append(_path) if len(futures) > 0: await asyncio.gather(*futures, loop=self.loop) _logger(logging.DEBUG, 'list dir(%d) DONE: %s', concurrent['a'], _dir) if len(failed_list_dirs) == 0: await _request_file_list(path) if len(file_list) == 0 and len(_failed_list_dirs) == 0 and not path.endswith('/'): parent_path = os.path.dirname(path) await _request_file_list(parent_path, search_depth=1) if len(file_list) > 0: _found = False for _f in file_list: if _f['path'] == path: file_list = [_f] _found = True break if not _found: return [], _failed_list_dirs else: for f_dir in failed_list_dirs: await _request_file_list(f_dir) if len(file_list) == 0 and len(_failed_list_dirs) == 0: return file_list, _failed_list_dirs file_list.sort(key=lambda _f: _f['path']) try: with open(buffer_file + '.ing', 'w') as pf: json.dump({ 'file_list': file_list, 'failed_list_dirs': _failed_list_dirs }, pf, indent=2) os.rename(buffer_file + '.ing', buffer_file) except: pass return file_list, _failed_list_dirs async def path_file_location(self, server_path, _logger=logger.log): file_loc_url_params = { 'devuid': '', 'channel': 'MAC_%s_MacBookPro15,1_netdisk_1099a' % get_mac_ver(), 'cuid': '', 'time': '', 'clienttype': '21', 'rand': '', 'logid': '', 'version': '2.1.0', 'vip': '0', 'app_id': '', 'err_ver': '1.0', 'ehps': '1', 'dtype': '1', 'ver': '4.0', 'dp-logid': '', 'check_blue': '1', 'esl': '1', 'method': 'locatedownload', } file_loc_url = self._compose_baidu_url(self._file_loc_url, **file_loc_url_params) _url = '%s&path=%s' % (file_loc_url, quote(server_path)) _logger(logging.DEBUG, "%s going to request location", server_path) j = await self.aio_request_with_retry(_url, _logger=_logger) return j['urls'] if j is not None and 'urls' in j else None def _compose_baidu_url(self, url, _logger=logger.log, **kwargs): baidu_url, params_from_confile = self._get_baidu_url_params(url) kwargs.update(params_from_confile) for idx, k in enumerate(kwargs): v = kwargs[k] baidu_url += ('?' if idx == 0 else '&') + '%s=%s' % (k, quote(v)) return baidu_url async def aio_request_with_retry(self, url, header=False, _logger=logger.log): return await self._aio_request_with_retry(url, header=header, _logger=_logger, **self.kwargs) async def _aio_request_with_retry(self, url, header=False, read_timeout=58, conn_timeout=5, limit_per_host=50, retry_count=10, retry_interval=2, _logger=logger.log, **kwargs): kwargs.pop('n', None) kwargs.pop('method', None) io_retry = 0 method = 'HEAD' if header else 'GET' while True: _logger(6, '%sing.#%d %s', method, io_retry, url) try: async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(limit_per_host=limit_per_host, enable_cleanup_closed=True, force_close=True), conn_timeout=conn_timeout, raise_for_status=True, loop=self.loop) as session: with Timeout(conn_timeout + 1, loop=self.loop): resp = await session.request(method=method, url=url, **kwargs) if header: _logger(6, '%s headers: %s', url[:50], resp.headers) return resp.headers with Timeout(read_timeout, loop=self.loop): j = await resp.json(content_type='') return j except (asyncio.TimeoutError, aiohttp.ClientResponseError) as err: _logger(logging.WARNING, '%sing.#%d %s\n\terror: %s', method, io_retry, url, err) if isinstance(err, aiohttp.ClientResponseError): if 400 <= err.code < 500: break io_retry += 1 if io_retry < retry_count: await asyncio.sleep(retry_interval * io_retry, loop=self.loop) else: break return None def _get_baidu_url_params(self, url: str) -> (str, dict): """ 从json配置文件中读取baidu访问的url的和参数 json文件格式格式: { "headers": { "params": { "<key1>": "<value1>", "<key2>": "<value2>", ... } } "<URL-1>": { "url": "<alternative_url>", "params": { "devuid": "<alternative_devuid>", ... } }, "<URL-2>": { "url": ..., "params": ... } } <URL-*> 可能是 1. https://pan.baidu.com/api/list (list目录的URL) 2. https://d.pcs.baidu.com/rest/2.0/pcs/file (获取文件下载location的URL) :param url: :return: """ if url in self.conf: conf = self.conf[url] if 'url' in conf: url = conf['url'] if 'params' in conf: return url, conf['params'].copy() return url, {}
apache-2.0
8,793,628,935,922,613,000
42.745536
198
0.520002
false
google/ctfscoreboard
scoreboard/tests/base.py
1
8885
# Copyright 2016 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base test module, MUST be imported first.""" import contextlib import copy import functools import json import logging import os import os.path import pbkdf2 import time import unittest import flask from flask import testing import flask_testing from sqlalchemy import event from scoreboard import attachments from scoreboard import cache from scoreboard import main from scoreboard import models from scoreboard import utils class BaseTestCase(flask_testing.TestCase): """Base TestCase for scoreboard. Monkey-patches the app and db objects. """ TEST_CONFIG = dict( PRESERVE_CONTEXT_ON_EXCEPTION=False, SECRET_KEY='testing-session-key', SQLALCHEMY_DATABASE_URI="sqlite://", TEAMS=True, TEAM_SECRET_KEY='different-secret', TESTING=True, DEBUG=False, ATTACHMENT_BACKEND='test://volatile', ) def create_app(self): """Called by flask_testing.""" app = main.get_app() app.config.update(self.TEST_CONFIG) attachments.patch("test") main.setup_logging(app) return app def setUp(self): """Re-setup the DB to ensure a fresh instance.""" super(BaseTestCase, self).setUp() # Reset config on each call try: app = main.get_app() app.config = copy.deepcopy(self.app._SAVED_CONFIG) except AttributeError: self.app._SAVED_CONFIG = copy.deepcopy(app.config) models.db.init_app(app) models.db.create_all() cache.global_cache = cache.cache.NullCache() # Reset cache def tearDown(self): models.db.session.remove() models.db.drop_all() super(BaseTestCase, self).tearDown() def queryLimit(self, limit=None): return MaxQueryBlock(self, limit) def assertItemsEqual(self, a, b, msg=None): a = list(a) b = list(b) a.sort() b.sort() if len(a) == len(b): success = True for c, d in zip(a, b): if c != d: success = False break if success: return None if msg is not None: raise AssertionError(msg) raise AssertionError('Items not equal: %r != %r', a, b) class RestTestCase(BaseTestCase): """Special features for testing rest handlers.""" def setUp(self): super(RestTestCase, self).setUp() # Monkey patch pbkdf2 for speed self._orig_pbkdf2 = pbkdf2.crypt pbkdf2.crypt = self._pbkdf2_dummy # Setup some special clients self.admin_client = AdminClient( self.app, self.app.response_class) self.authenticated_client = AuthenticatedClient( self.app, self.app.response_class) def tearDown(self): super(RestTestCase, self).tearDown() pbkdf2.crypt = self._orig_pbkdf2 def postJSON(self, path, data, client=None): client = client or self.client return client.post( path, data=json.dumps(data), content_type='application/json') def putJSON(self, path, data, client=None): client = client or self.client return client.put( path, data=json.dumps(data), content_type='application/json') @contextlib.contextmanager def swapClient(self, client): old_client = self.client self.client = client yield self.client = old_client @staticmethod def _pbkdf2_dummy(value, *unused_args): return value class AuthenticatedClient(testing.FlaskClient): """Like TestClient, but authenticated.""" def __init__(self, *args, **kwargs): super(AuthenticatedClient, self).__init__(*args, **kwargs) self.team = models.Team.create('team') self.password = 'hunter2' self.user = models.User.create( '[email protected]', 'Authenticated', self.password, team=self.team) models.db.session.commit() self.uid = self.user.uid self.tid = self.team.tid def open(self, *args, **kwargs): with self.session_transaction() as sess: sess['user'] = self.uid sess['team'] = self.tid sess['expires'] = time.time() + 3600 return super(AuthenticatedClient, self).open(*args, **kwargs) class AdminClient(testing.FlaskClient): """Like TestClient, but admin.""" def __init__(self, *args, **kwargs): super(AdminClient, self).__init__(*args, **kwargs) self.user = models.User.create('[email protected]', 'Admin', 'hunter2') self.user.admin = True models.db.session.commit() self.uid = self.user.uid def open(self, *args, **kwargs): with self.session_transaction() as sess: sess['user'] = self.uid sess['admin'] = True sess['expires'] = time.time() + 3600 return super(AdminClient, self).open(*args, **kwargs) class MaxQueryBlock(object): """Run a certain block with a maximum number of queries.""" def __init__(self, test=None, max_count=None): self.max_count = max_count self.queries = [] self._sql_listen_args = ( models.db.engine, 'before_cursor_execute', self._count_query) self.test_id = test.id() if test else '' def __enter__(self): event.listen(*self._sql_listen_args) return self def __exit__(self, exc_type, exc_value, exc_traceback): event.remove(*self._sql_listen_args) if exc_type is not None: return False if self.test_id: limit_msg = ((' Limit: %d.' % self.max_count) if self.max_count is not None else '') logging.info('%s executed %d queries.%s', self.test_id, len(self.queries), limit_msg) if self.max_count is None: return if len(self.queries) > self.max_count: message = ('Maximum query count exceeded: limit %d, executed %d.\n' '----QUERIES----\n%s\n----END----') % ( self.max_count, len(self.queries), '\n'.join(self.queries)) raise AssertionError(message) @property def query_count(self): return len(self.queries) def _count_query(self, unused_conn, unused_cursor, statement, parameters, unused_context, unused_executemany): statement = '%s (%s)' % ( statement, ', '.join(str(x) for x in parameters)) self.queries.append(statement) logging.debug('SQLAlchemy: %s', statement) def authenticated_test(f): """Swaps out the client for an authenticated client.""" @functools.wraps(f) def wrapped_test(self): with self.swapClient(self.authenticated_client): return f(self) return wrapped_test def admin_test(f): """Swaps out the client for an admin client.""" @functools.wraps(f) def wrapped_test(self): with self.swapClient(self.admin_client): return f(self) return wrapped_test def run_all_tests(pattern='*_test.py'): """This loads and runs all tests in scoreboard.tests.""" if os.getenv("DEBUG_TESTS"): logging.getLogger().setLevel(logging.DEBUG) else: logging.getLogger().setLevel(logging.INFO) test_dir = os.path.dirname(os.path.realpath(__file__)) top_dir = os.path.abspath(os.path.join(test_dir, '..')) suite = unittest.defaultTestLoader.discover( test_dir, pattern=pattern, top_level_dir=top_dir) result = unittest.TextTestRunner().run(suite) return result.wasSuccessful() def json_monkeypatch(): """Automatically strip our XSSI header.""" def new_loads(data, *args, **kwargs): try: prefix = utils.to_bytes(")]}',\n") if data.startswith(prefix): data = data[len(prefix):] return json.loads(data, *args, **kwargs) except Exception as exc: logging.exception('JSON monkeypatch failed: %s', exc) flask.json.loads = new_loads json_monkeypatch()
apache-2.0
-9,067,678,353,243,556,000
30.960432
79
0.597299
false
appcove/FileStruct
Python/FileStruct/test/test_core.py
1
36712
# vim:fileencoding=utf-8:ts=2:sw=2:expandtab # # Copyright 2013 AppCove, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from os.path import ( join, exists, isfile, isdir, islink, dirname, basename, normpath, sameopenfile, samefile ) import unittest import os import io import pwd import grp import tempfile import shutil import json import hashlib import random import string import contextlib try: import FileStruct except ImportError: # Make sure "python -m unittest discover" will work from source checkout import sys src_dir = join(dirname(__file__), '..', '..') assert isdir(normpath(join(src_dir, 'FileStruct'))), normpath(join(src_dir, 'FileStruct')) sys.path.insert(0, src_dir) try: import FileStruct finally: sys.path.pop(0) class TestClientBase(unittest.TestCase): def setUp(self): self.ValidConfig = '{"Version":1}' self.ValidConfigVersion = 1 self.Path = tempfile.mkdtemp(suffix='_FileStruct_Test') self.DataPath = join(self.Path, 'Data') # expected to be there by e.g. httpd self.PathConfig = join(self.Path, 'FileStruct.json') def tearDown(self): try: shutil.rmtree(self.Path) except OSError: pass def write_config(self, config, json_encode=True): if json_encode: config = json.dumps(config, indent=2) with open(self.PathConfig, 'w', encoding='utf-8') as fp: fp.write(config) def client_from_valid_config(self): return self.client_from_config(self.ValidConfig, False) def client_from_config(self, config, json_encode=True): self.write_config(config, json_encode=json_encode) return FileStruct.Client(self.Path) def client_from_config_err(self, config, json_encode=True): with self.assertRaises(FileStruct.ConfigError): self.client_from_config(config, json_encode=json_encode) class TestClientBasic(TestClientBase): def test_Minimal(self): self.assertTrue(self.client_from_valid_config()) # to make sure that "if client:" will work self.client_from_config({'Version': 1}) self.client_from_config({'Version': '1'}) self.client_from_config('{"Version": "1"}', False) def test_Comments(self): self.client_from_config('''{ # Some test comment "Version": 1}''', False) self.client_from_config('''{ # Some test comment "Version": 1 #Waka waka }''', False) self.client_from_config('{"Version": 1\r#test}\n}', False) def test_ExtraData(self): self.client_from_config({'Version': 1, 'WhateverKey': 2, 3: 'SomeValue'}) self.client_from_config({'Version': 1, 'User': 'whoever', 'Group': 'whatever'}) def test_UserGroup(self): self.client_from_config({'Version': 1, 'User': 123, 'Group': 456}) self.client_from_config({'Version': 1, 'User': 'one', 'Group': 'two'}) self.client_from_config({'Version': 1, 'User': None, 'Group': None}) self.client_from_config({'Version': 1, 'User': [1, 'asd'], 'Group': {'a': [42]}}) class TestClientInvalidConfig(TestClientBase): def test_EmptyOrGibberish(self): self.client_from_config_err('', False) self.client_from_config_err('some non-json text {{{', False) self.client_from_config_err('{"Version": 1', False) self.client_from_config_err("Version: 1", False) self.client_from_config_err('{"Version": 1}') self.client_from_config_err('{"Version": 1\n#comment }', False) self.client_from_config_err('{"Version": 1#comment }', False) self.client_from_config_err('{#"Version": 1}', False) self.client_from_config_err('#{"Version": 1}', False) self.client_from_config_err(self.ValidConfig.rstrip() + '#test', False) def test_Signature(self): self.write_config(self.ValidConfig, False) FileStruct.Client(Path=self.Path, InternalLocation='/Whatever') FileStruct.Client(self.Path, '/Whatever') with self.assertRaises(TypeError): FileStruct.Client() with self.assertRaises(TypeError): FileStruct.Client(InternalLocation='/Whatever') def test_Types(self): self.write_config(self.ValidConfig, False) with self.assertRaises(Exception): FileStruct.Client(None) with self.assertRaises(Exception): FileStruct.Client(object()) FileStruct.Client(self.Path, None) FileStruct.Client(self.Path, object()) with self.assertRaises(TypeError): FileStruct.Client(self.Path.encode('utf-8')) FileStruct.Client(self.Path, b'/Whatever') def test_RandomJSON(self): self.client_from_config_err({}) self.client_from_config_err(None) self.client_from_config_err({'RandomKey': 'WhateverValue'}) self.client_from_config_err({'Verson': 1}) self.client_from_config_err([1, 2, {'test': 'data'}]) def test_Version(self): # Allow for any positive-int one for ver in [-1,0,'1beta','beta1','-1','a','1a','123b',[]]: self.client_from_config_err({'Version': ver}) def test_NoConfig(self): self.assertFalse(exists(self.PathConfig)) with self.assertRaises(FileStruct.ConfigError): FileStruct.Client(self.Path) self.assertFalse(exists(self.PathConfig)) # make sure it wasn't auto-created self.write_config(self.ValidConfig, False) os.chmod(self.PathConfig, 0) with self.assertRaises(FileStruct.ConfigError): FileStruct.Client(self.Path) self.assertTrue(exists(self.PathConfig)) class TestClientDBDir(TestClientBase): def find_xid(self, getent, skip_check=None, nx=False): xid = 1 while xid < 65535: xid += 1 try: struct = getent(xid) except KeyError: if not nx: continue else: struct = xid else: if nx: continue if skip_check and skip_check(struct): continue return xid def mangle_stat(self, stat_result, gid=False, uid=False): self.assertTrue(uid or gid) uid_real_pwd_struct = pwd.getpwuid(os.geteuid()) dir_grp_struct = grp.getgrgid(stat_result.st_gid) # Make sure "original" data passes *expected* check stat_result = type( 'mock_stat', (object,), dict((k, getattr(stat_result, k)) for k in dir(stat_result) if not k.startswith('_')) ) self.assertFalse( uid_real_pwd_struct.pw_gid != dir_grp_struct.gr_gid and uid_real_pwd_struct.pw_name not in dir_grp_struct.gr_mem ) if uid: if uid is not True: stat_result.st_uid = uid else: stat_result.st_uid = self.find_xid( pwd.getpwuid, lambda s: s.pw_uid == stat_result.st_uid\ or s.pw_name in dir_grp_struct.gr_mem ) if gid: if gid is not True: stat_result.st_gid = gid else: stat_result.st_gid = self.find_xid( grp.getgrgid, lambda s: s.gr_gid == stat_result.st_gid\ or uid_real_pwd_struct.pw_name in s.gr_mem ) return stat_result def test_MissingDir(self): os.rmdir(self.Path) self.assertFalse(exists(self.Path)) with self.assertRaises(FileStruct.ConfigError): FileStruct.Client(self.Path) self.assertFalse(exists(self.Path)) # make sure it wasn't auto-created # Valid config file in place of dir with open(self.Path, 'w', encoding='utf-8') as fp: fp.write(self.ValidConfig) with self.assertRaises(FileStruct.ConfigError): FileStruct.Client(self.Path) self.assertTrue(isfile(self.Path)) def test_DataDirCreated(self): # Expected to be there, according to frontend httpd setup self.assertFalse(isdir(self.DataPath)) self.client_from_valid_config() self.assertTrue(isdir(self.DataPath)) def test_GidMismatch(self): geteuid = os.geteuid os.geteuid = lambda: self.find_xid( pwd.getpwuid, lambda s: s.pw_uid == stat_result.st_uid\ or s.pw_gid == os.stat(self.Path).gr_gid\ or s.pw_name in dir_grp_struct.gr_mem ) try: with self.assertRaises(FileStruct.ConfigError): self.client_from_valid_config() finally: os.geteuid = geteuid def test_NoPasswdGroupEntries(self): geteuid, os.geteuid = os.geteuid, lambda: self.find_xid(pwd.getpwuid, nx=True) try: with self.assertRaises(KeyError): pwd.getpwuid(os.geteuid()) with self.assertRaises(FileStruct.ConfigError): self.client_from_valid_config() finally: os.geteuid = geteuid getegid, os.getegid = os.getegid, lambda: self.find_xid(grp.getgrgid, nx=True) try: with self.assertRaises(KeyError): grp.getgrgid(os.getegid()) with self.assertRaises(FileStruct.ConfigError): self.client_from_valid_config() finally: os.getegid = getegid def test_InvalidDirGid(self): stat, os.stat = os.stat, lambda path: self.mangle_stat(stat(path), gid=True) try: with self.assertRaises(FileStruct.ConfigError): self.client_from_valid_config() finally: os.stat = stat def test_AnyDirUidInPasswdWorks(self): stat, os.stat = os.stat, lambda path: self.mangle_stat(stat(path), uid=True) try: self.client_from_valid_config() finally: os.stat = stat def test_NoDirGidEntry(self): stat, os.stat = os.stat, lambda path:\ self.mangle_stat(stat(path), gid=self.find_xid(grp.getgrgid, nx=True)) try: with self.assertRaises(FileStruct.ConfigError): self.client_from_valid_config() finally: os.stat = stat class TestClientOps(TestClientBase): class UnhandledTestException(Exception): pass def setUp(self): super(TestClientOps, self).setUp() self.write_config(self.ValidConfig, False) self.InternalLocation = '/Whatever' self.Client = FileStruct.Client(self.Path, self.InternalLocation) self.FileContents = b'abcd' self.FileHash = self.Client.PutData(self.FileContents) self.FileHashEmpty = self.Client.PutData(b'') os.unlink(self.Client[self.FileHashEmpty].Path) self.FileContentsNX = b'abcde' self.FileHashNX = self.Client.PutData(self.FileContentsNX) self.FilePathNX = self.Client[self.FileHashNX].Path os.unlink(self.Client[self.FileHashNX].Path) random.seed(42) self.FileHashInvalidList = [ self.FileHash[:-1] + '\0', 'x' + self.FileHash[1:], '123', 'варвр', ''.join( random.choice(string.hexdigits.lower()) for i in range(len(self.FileHash) - 2) ), self.FileHash[2:], self.FileHash + 'a123', self.FileHash[:-2] + 'AB' ] self.FileHashInvalidType = [ None, object(), str, type, True, self.FileHash.encode('ascii') ] class TestClientAttrs(TestClientOps): def test_PublicAttrs(self): self.assertEqual(self.Path, self.Client.Path) self.assertEqual(self.DataPath, self.Client.DataPath) def test_PrivateAttrs(self): self.assertEqual(self.PathConfig, self.Client.ConfPath) self.assertEqual(self.ValidConfigVersion, self.Client.Version) self.assertEqual(self.InternalLocation, self.Client.InternalLocation) class TestClientHashes(TestClientOps): def test_Get(self): self.assertTrue(self.Client[self.FileHash]) with self.assertRaises(KeyError): self.Client[self.FileHashNX] def test_GetInvalidHash(self): for bad_hash in self.FileHashInvalidList: with self.assertRaises(ValueError): self.Client[bad_hash] def test_GetInvalidType(self): for bad_type in self.FileHashInvalidType: with self.assertRaises(TypeError): self.Client[bad_type] def test_Contains(self): self.assertIs(self.FileHash in self.Client, True) self.assertIs(self.FileHashNX in self.Client, False) def test_ContainsInvalidType(self): for bad_type in self.FileHashInvalidType: with self.assertRaises(TypeError): bad_type in self.Client def test_ContainsInvalidHash(self): for bad_hash in self.FileHashInvalidList: self.assertFalse(bad_hash in self.Client) def test_InternalURI(self): self.assertTrue(self.Client.HashToInternalURI(self.FileHash)) self.assertTrue(self.Client.HashToInternalURI(self.FileHashNX)) self.assertTrue(self.Client.HashToInternalURI(self.FileHashEmpty)) self.assertIsInstance(self.Client.HashToInternalURI(self.FileHash), str) for bad_hash in self.FileHashInvalidList: with self.assertRaises(ValueError): self.Client.HashToInternalURI(bad_hash) for bad_type in self.FileHashInvalidType: with self.assertRaises(TypeError): self.Client.HashToInternalURI(bad_type) self.assertTrue( self.Client.HashToInternalURI(self.FileHash).startswith(self.InternalLocation + '/') ) self.assertFalse('//' in self.Client.HashToInternalURI(self.FileHash)) self.assertTrue( self.Client.HashToInternalURI(self.FileHashNX).startswith(self.InternalLocation + '/') ) def test_InternalURISlashes(self): # Make sure paths are *not* auto-fixed (if fails, update docs) bad_path = '//some///broken/path/../whatever//./' client = FileStruct.Client(self.Path, bad_path) file_hash = client.PutData(self.FileContents) self.assertTrue(client.HashToInternalURI(file_hash).startswith(bad_path)) self.assertFalse(client.HashToInternalURI(file_hash).startswith(bad_path + '/')) def test_InternalURIRecode(self): # Make sure path encoding is *not* auto-fixed (if fails, update docs) bad_path = '//some\0///broken\n\n/path/../фывапр//./' client = FileStruct.Client(self.Path, bad_path) file_hash = client.PutData(self.FileContents) self.assertTrue(client.HashToInternalURI(file_hash).startswith(bad_path)) def test_Path(self): self.assertTrue(self.Client.HashToPath(self.FileHash)) self.assertTrue(self.Client.HashToPath(self.FileHashNX)) self.assertIsInstance(self.Client.HashToPath(self.FileHash), str) for bad_hash in self.FileHashInvalidList: with self.assertRaises(ValueError): self.Client.HashToPath(bad_hash) for bad_type in self.FileHashInvalidType: with self.assertRaises(TypeError): self.Client.HashToPath(bad_type) self.assertTrue( self.Client.HashToPath(self.FileHash).startswith(self.Path) ) self.assertTrue( self.Client.HashToPath(self.FileHashNX).startswith(self.Path) ) def test_GetTempDir(self): self.assertTrue(self.Client.TempDir) class ClientGetTestsMixin: # re-used for HashFile and TempFile tests def get_client_file_obj(self): raise NotImplementedError def get_client_file_data(self): raise NotImplementedError def test_GetData(self): self.assertEqual(self.get_client_file_obj().GetData(), self.get_client_file_data()) def test_GetStream(self): file_obj = self.get_client_file_obj() stream = file_obj.GetStream() try: self.assertEqual(stream.tell(), 0) self.assertEqual(stream.read(), self.get_client_file_data()) self.assertEqual(stream.tell(), len(self.get_client_file_data())) stream.seek(0) self.assertEqual(stream.tell(), 0) self.assertTrue(stream.name) finally: stream.close() stream.close() # should not raise errors on double-close self.assertTrue(stream.closed) def test_GetStreamContext(self): with self.get_client_file_obj().GetStream() as stream: self.assertEqual(stream.read(), self.get_client_file_data()) def test_GetStreamFD(self): stream = self.get_client_file_obj().GetStream() try: self.assertIsInstance(stream.fileno(), int) with open(stream.fileno(), 'rb', closefd=False) as stream_clone: self.assertEqual(stream_clone.read(), self.get_client_file_data()) self.assertEqual(stream.tell(), len(self.get_client_file_data())) self.assertFalse(stream.closed) finally: stream.close() def test_GetFailNX(self): file_obj = self.get_client_file_obj() os.unlink(file_obj.Path) with self.assertRaises(FileNotFoundError): file_obj.GetData() with self.assertRaises(FileNotFoundError): file_obj.GetStream() def test_GetFailPerm(self): file_obj = self.get_client_file_obj() os.chmod(file_obj.Path, 0) with self.assertRaises(PermissionError): file_obj.GetData() with self.assertRaises(PermissionError): file_obj.GetStream() def test_GetFailContext(self): with self.assertRaises(self.UnhandledTestException): with self.get_client_file_obj().GetStream() as stream: raise self.UnhandledTestException() self.assertTrue(stream.closed) def test_GetStreamNoCache(self): # Make sure that GetStream doesn't create temporary # file and refers to the same fs object as BaseFile.Path file_obj = self.get_client_file_obj() stream = file_obj.GetStream() self.assertTrue(samefile(stream.name, file_obj.Path)) with open(file_obj.Path, 'rb') as tmp: self.assertTrue(sameopenfile(stream.fileno(), tmp.fileno())) try: os.unlink(file_obj.Path) self.assertFalse(stream.closed) self.assertFalse(exists(stream.name)) with self.assertRaises(FileNotFoundError): file_obj.GetStream() finally: stream.close() class TestClientFile(TestClientOps, ClientGetTestsMixin): def test_Removal(self): self.assertFalse(self.FileHashNX in self.Client) with self.assertRaises(KeyError): self.Client[self.FileHashNX] file_hash = self.Client.PutData(b'dsdfjlkjasdjkasd') file_path = self.Client[file_hash].Path os.unlink(file_path) self.assertFalse(file_hash in self.Client) with self.assertRaises(KeyError): self.Client[file_hash] self.assertFalse(exists(file_path)) def test_StreamFile(self): with tempfile.TemporaryFile() as tmp: tmp.write(self.FileContentsNX) tmp.seek(0) file_hash = self.Client.PutStream(tmp) self.assertFalse(tmp.closed) self.assertTrue(tmp.tell() == len(self.FileContentsNX)) tmp.seek(0) self.assertEqual(tmp.read(), self.FileContentsNX) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) with open(self.Client[self.FileHashNX].Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsNX) def test_StreamBuffer(self): tmp = io.BytesIO(self.FileContentsNX) file_hash = self.Client.PutStream(tmp) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) with open(self.Client[self.FileHashNX].Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsNX) def test_LargeStream(self): with tempfile.TemporaryFile() as tmp: tmp_hash = hashlib.sha384() null_chunk = bytearray(2**20) # 1 MiB for i in range(10): # 10 MiB tmp.write(null_chunk) tmp_hash.update(null_chunk) tmp.seek(0) file_hash = self.Client.PutStream(tmp) with open(self.Client[file_hash].Path, 'rb') as tmp2: tmp2_hash = hashlib.sha384() for chunk in iter(lambda: tmp2.read(2**20), b''): tmp2_hash.update(chunk) self.assertEqual(tmp_hash.digest(), tmp2_hash.digest()) def test_StreamCustom(self): test = self class FileLikeObject: # bare-minimum filelike object data = self.FileContentsNX def read(self, n): test.assertIsInstance(n, int) if self.data: data, self.data = self.data, None return data else: return b'' file_hash = self.Client.PutStream(FileLikeObject()) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) with open(self.Client[self.FileHashNX].Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsNX) def test_StreamFail(self): with self.assertRaises(AttributeError): self.Client.PutStream(object()) with self.assertRaises(AttributeError): self.Client.PutStream(None) with self.assertRaises(AttributeError): self.Client.PutStream(b'') def test_File(self): with tempfile.NamedTemporaryFile() as tmp: tmp.write(self.FileContentsNX) tmp.seek(0) file_hash = self.Client.PutFile(tmp.name) tmp.seek(0) self.assertEqual(tmp.read(), self.FileContentsNX) self.assertTrue(isfile(tmp.name)) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) with open(self.Client[self.FileHashNX].Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsNX) def test_FileFD(self): tmp_fd, tmp_name = tempfile.mkstemp() try: with open(tmp_fd, 'ab+', closefd=False) as tmp: tmp.write(self.FileContentsNX) tmp.seek(0) file_hash = self.Client.PutFile(tmp_fd) finally: os.unlink(tmp_name) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) def test_FileFail(self): with self.assertRaises(TypeError): self.Client.PutFile(object()) self.assertFalse(exists(self.FilePathNX)) with self.assertRaises(FileNotFoundError): self.Client.PutFile(self.FilePathNX) with open(self.FilePathNX, 'w'): try: os.chmod(self.FilePathNX, 0) with self.assertRaises(PermissionError): self.Client.PutFile(self.FilePathNX) finally: os.unlink(self.FilePathNX) def test_Data(self): file_hash = self.Client.PutData(self.FileContentsNX) self.assertEqual(file_hash, self.FileHashNX) self.assertTrue(isfile(self.Client[self.FileHashNX].Path)) with open(self.Client[self.FileHashNX].Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsNX) def test_DataNone(self): file_hash = self.Client.PutData(None) self.assertEqual(self.FileHashEmpty, file_hash) def test_DataFail(self): with self.assertRaises(TypeError): self.Client.PutData(object()) with self.assertRaises(TypeError): self.Client.PutData(True) with self.assertRaises(TypeError): self.Client.PutData('asdx') def test_HashConsistency(self): file_hash = self.Client.PutData(self.FileContentsNX) self.assertEqual(file_hash, self.FileHashNX) def test_HashOverwrite(self): # Shouldn't raise any errors file_hash = self.Client.PutData(self.FileContents) self.assertEqual(file_hash, self.FileHash) def test_InternalURI(self): self.Client[self.FileHash].InternalURI self.assertIsInstance(self.Client[self.FileHash].InternalURI, str) self.assertTrue(self.Client[self.FileHash].InternalURI.startswith(self.InternalLocation + '/')) self.assertFalse('//' in self.Client[self.FileHash].InternalURI) def test_InternalURISlashes(self): # Make sure paths are *not* auto-fixed (if fails, update docs) bad_path = '//some///broken/path/../whatever//./' client = FileStruct.Client(self.Path, bad_path) file_hash = client.PutData(self.FileContents) self.assertTrue(client[file_hash].InternalURI.startswith(bad_path)) self.assertFalse(client[file_hash].InternalURI.startswith(bad_path + '/')) def test_InternalURIRecode(self): # Make sure path encoding is *not* auto-fixed (if fails, update docs) bad_path = '//some\0///broken\n\n/path/../фывапр//./' client = FileStruct.Client(self.Path, bad_path) file_hash = client.PutData(self.FileContents) self.assertTrue(client[file_hash].InternalURI.startswith(bad_path)) def test_GetAttrs(self): self.assertTrue(self.Client[self.FileHash].Path) self.assertIsInstance(self.Client[self.FileHash].Path, str) self.assertTrue(self.Client[self.FileHash].Path.startswith(self.Path)) self.assertEqual(self.Client[self.FileHash].Hash, self.FileHash) self.assertTrue(self.Client[self.FileHash].InternalURI) def test_GetMulticlient(self): # Make sure there's no locking involved (if added - update docs) client2 = FileStruct.Client(self.Path) file_obj1 = self.get_client_file_obj() file_obj2 = client2[self.FileHash] with file_obj1.GetStream() as stream1: self.assertEqual(file_obj1.GetData(), file_obj2.GetData()) with file_obj2.GetStream() as stream2: self.assertTrue(sameopenfile(stream1.fileno(), stream2.fileno())) self.assertFalse(stream1.closed) # For ClientGetTestsMixin def get_client_file_obj(self): return self.Client[self.FileHash] def get_client_file_data(self): return self.FileContents class TestClientTempDir(TestClientOps): def test_Context(self): with self.Client.TempDir() as tmpdir: self.assertTrue(tmpdir) def test_PublicAttrs(self): with self.Client.TempDir() as tmpdir: self.assertTrue(tmpdir.Path) self.assertTrue(tmpdir.Path) def test_Lifecycle(self): with self.Client.TempDir() as tmpdir: tmpdir_path = tmpdir.Path self.assertTrue(isdir(tmpdir_path)) self.assertFalse(exists(tmpdir_path)) def test_CleanupTempFiles(self): with self.Client.TempDir() as tmpdir: tmpfile = tmpdir['file'] tmpfile.PutData(b'asd') self.assertTrue(exists(tmpfile.Path)) self.assertFalse(exists(tmpfile.Path)) self.assertFalse(exists(tmpdir.Path)) def test_CleanupClutter(self): with self.Client.TempDir() as tmpdir: with open(join(tmpdir.Path, 'clutter'), 'w'): pass os.mkdir(join(tmpdir.Path, 'clutter2')) with open(join(tmpdir.Path, 'clutter2', 'clutter3'), 'w'): pass self.assertFalse(exists(tmpdir.Path)) def test_CleanupException(self): with self.assertRaises(self.UnhandledTestException): with self.Client.TempDir() as tmpdir: raise self.UnhandledTestException() self.assertFalse(exists(tmpdir.Path)) def test_CleanupFail(self): with self.assertRaises(FileNotFoundError): with self.Client.TempDir() as tmpdir: shutil.rmtree(tmpdir.Path) def test_GetFile(self): with self.Client.TempDir() as tmpdir: self.assertTrue(tmpdir['file']) self.assertTrue(tmpdir['1']) def test_GetFileInvalid(self): with self.Client.TempDir() as tmpdir: for badname in ['file\0123', '1`23', '123#', 'asd:', 'asd/sdf', 'варвр', 'a'*256]: with self.assertRaises(ValueError): tmpfile = tmpdir[badname] self.assertTrue(tmpdir['file']) def test_GetFileNoDir(self): success = False try: with self.Client.TempDir() as tmpdir: shutil.rmtree(tmpdir.Path) tmpdir['file'] # no error success = True except FileNotFoundError: # should be raised during cleanup if not success: raise class TestClientTempOps(TestClientOps): def setUp(self): super(TestClientTempOps, self).setUp() self.Contexts = contextlib.ExitStack() self.TempDir = self.Client.TempDir() self.Contexts.enter_context(self.TempDir) self.FileContentsTemp = self.FileContentsNX self.TempFileName = 'file' self.TempFile = self.TempDir[self.TempFileName] self.TempFile.PutData(self.FileContentsTemp) self.FileContentsTempNX = self.FileContents self.TempFileNameNX = 'fileNX' self.TempFileNX = self.TempDir[self.TempFileNameNX] def tearDown(self): self.Contexts.close() super(TestClientTempOps, self).tearDown() class TestClientTempConvert(TestClientTempOps): def setUp(self): super(TestClientTempConvert, self).setUp() skip = not exists(self.Client.bin_convert) self.skip = unittest.skip( 'No ImageMagick' ' "convert" binary found at {}'.format(self.Client.bin_convert) )\ if skip else False if self.skip: return self.skip self.TempImageSource = join(dirname(__file__), 'image.jpg') self.TempImageName = 'imagefile' self.TempImage = self.TempDir[self.TempImageName] self.TempImage.PutFile(self.TempImageSource) self.TempImageHash = self.Client.PutFile(self.TempImageSource) self.ValidSizeSpec = '100x100' def test_Basic(self): if self.skip: return self.skip self.TempDir.convert_resize(self.TempImageName, self.TempImageName+'2', self.ValidSizeSpec) def test_Linked(self): if self.skip: return self.skip self.TempFileNX.Link(self.TempImageHash) self.TempDir.convert_resize(self.TempFileNameNX, self.TempImageName+'2', self.ValidSizeSpec) def test_Overwrite(self): if self.skip: return self.skip self.TempDir.convert_resize(self.TempImageName, self.TempFileName, self.ValidSizeSpec) self.assertNotEqual(self.TempFile.GetData(), self.FileContentsTemp) def test_FailTypes(self): if self.skip: return self.skip with self.assertRaises(TypeError): self.TempDir.convert_resize(self.TempImageName, self.TempImageName+'2') with self.assertRaises(TypeError): self.TempDir.convert_resize(self.TempImageName) for bad_srcdst_type in [object(), None, True, b'file']: with self.assertRaises(TypeError): self.TempDir.convert_resize(bad_srcdst_type, self.TempImageName+'2', self.ValidSizeSpec) with self.assertRaises(TypeError): self.TempDir.convert_resize(self.TempImageName, bad_srcdst_type, self.ValidSizeSpec) def test_FailNX(self): if self.skip: return self.skip with self.assertRaises(FileStruct.Error): self.TempDir.convert_resize( self.TempFileNameNX, self.TempImageName+'2', self.ValidSizeSpec ) class TestClientTempFile(TestClientTempOps, ClientGetTestsMixin): def test_Ingest(self): self.assertTrue(isdir(self.TempDir.Path)) tmp_hash = self.TempFile.Ingest() self.assertEqual(self.FileHashNX, tmp_hash) self.assertTrue(tmp_hash in self.Client) self.assertTrue(self.Client[tmp_hash]) def test_NoCreate(self): self.assertFalse(exists(self.TempFileNX.Path)) # Put tests are a bit different from these for HashFile # due to abscence of hashes and related checks def test_StreamFile(self): with tempfile.TemporaryFile() as tmp: tmp.write(self.FileContentsTempNX) tmp.seek(0) self.TempFileNX.PutStream(tmp) self.assertFalse(tmp.closed) self.assertTrue(tmp.tell() == len(self.FileContentsTempNX)) tmp.seek(0) self.assertEqual(tmp.read(), self.FileContentsTempNX) self.assertTrue(isfile(self.TempFileNX.Path)) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsTempNX) def test_StreamBuffer(self): tmp = io.BytesIO(self.FileContentsTempNX) self.TempFileNX.PutStream(tmp) self.assertTrue(isfile(self.TempFileNX.Path)) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsTempNX) def test_LargeStream(self): with tempfile.TemporaryFile() as tmp: tmp_hash = hashlib.sha384() null_chunk = bytearray(2**20) # 1 MiB for i in range(10): # 10 MiB tmp.write(null_chunk) tmp_hash.update(null_chunk) tmp.seek(0) self.TempFileNX.PutStream(tmp) with open(self.TempFileNX.Path, 'rb') as tmp2: tmp2_hash = hashlib.sha384() for chunk in iter(lambda: tmp2.read(2**20), b''): tmp2_hash.update(chunk) self.assertEqual(tmp_hash.digest(), tmp2_hash.digest()) def test_StreamCustom(self): test = self class FileLikeObject: # bare-minimum filelike object data = self.FileContentsTempNX def read(self, n): test.assertIsInstance(n, int) if self.data: data, self.data = self.data, None return data else: return b'' self.TempFileNX.PutStream(FileLikeObject()) self.assertTrue(isfile(self.TempFileNX.Path)) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsTempNX) def test_StreamFail(self): with self.assertRaises(AttributeError): self.TempFileNX.PutStream(object()) with self.assertRaises(AttributeError): self.TempFileNX.PutStream(None) with self.assertRaises(AttributeError): self.TempFileNX.PutStream(b'') def test_File(self): with tempfile.NamedTemporaryFile() as tmp: tmp.write(self.FileContentsTempNX) tmp.seek(0) self.TempFileNX.PutFile(tmp.name) tmp.seek(0) self.assertEqual(tmp.read(), self.FileContentsTempNX) self.assertTrue(isfile(tmp.name)) self.assertTrue(isfile(self.TempFileNX.Path)) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsTempNX) def test_FileFD(self): tmp_fd, tmp_name = tempfile.mkstemp() try: with open(tmp_fd, 'ab+', closefd=False) as tmp: tmp.write(self.FileContentsTempNX) tmp.seek(0) self.TempFileNX.PutFile(tmp_fd) finally: os.unlink(tmp_name) self.assertTrue(isfile(self.TempFileNX.Path)) def test_FileFail(self): with self.assertRaises(TypeError): self.TempFileNX.PutFile(object()) self.assertFalse(exists(self.FilePathNX)) with self.assertRaises(FileNotFoundError): self.TempFileNX.PutFile(self.FilePathNX) with open(self.FilePathNX, 'w'): try: os.chmod(self.FilePathNX, 0) with self.assertRaises(PermissionError): self.Client.PutFile(self.FilePathNX) finally: os.unlink(self.FilePathNX) def test_Data(self): self.TempFileNX.PutData(self.FileContentsTempNX) self.assertTrue(isfile(self.TempFileNX.Path)) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), self.FileContentsTempNX) def test_DataNone(self): self.TempFileNX.PutData(None) with open(self.TempFileNX.Path, 'rb') as src: self.assertEqual(src.read(), b'') def test_DataFail(self): with self.assertRaises(TypeError): self.TempFileNX.PutData(object()) with self.assertRaises(TypeError): self.TempFileNX.PutData(True) with self.assertRaises(TypeError): self.TempFileNX.PutData('asdx') def test_GetAttrs(self): self.assertTrue(self.TempFile.Path) self.assertIsInstance(self.TempFile.Path, str) self.assertTrue(self.TempFile.Path.startswith(self.TempDir.Path)) self.assertIs(self.TempFile.TempDir, self.TempDir) def test_GetMultiFile(self): file_obj1 = self.get_client_file_obj() file_obj2 = self.TempDir[self.TempFileName] file_obj3 = self.TempDir[self.TempFileName + 'a'] file_obj3.PutData(self.FileContentsNX) with file_obj1.GetStream() as stream1: self.assertEqual(file_obj1.GetData(), file_obj2.GetData()) with file_obj2.GetStream() as stream2: self.assertTrue(sameopenfile(stream1.fileno(), stream2.fileno())) self.assertFalse(stream1.closed) with file_obj3.GetStream() as stream2: self.assertFalse(samefile(stream1.name, stream2.name)) self.assertFalse(sameopenfile(stream1.fileno(), stream2.fileno())) # For ClientGetTestsMixin def get_client_file_obj(self): return self.TempFile def get_client_file_data(self): return self.FileContentsTemp def test_Link(self): self.TempFileNX.Link(self.FileHash) self.assertFalse(islink(self.Client[self.FileHash].Path)) self.assertTrue(islink(self.TempFileNX.Path)) self.assertTrue(samefile(self.TempFileNX.Path, self.Client[self.FileHash].Path)) with self.TempFileNX.GetStream() as stream1,\ self.Client[self.FileHash].GetStream() as stream2: self.assertTrue(sameopenfile(stream1.fileno(), stream2.fileno())) def test_LinkFailPaths(self): with self.assertRaises(KeyError): self.TempFile.Link(self.FileHashNX) with self.assertRaises(FileExistsError): self.TempFile.Link(self.FileHash) def test_LinkFail(self): with self.assertRaises(TypeError): self.TempFile.Link(object()) with self.assertRaises(TypeError): self.TempFile.Link() with self.assertRaises(TypeError): self.TempFile.Link(None) with self.assertRaises(TypeError): self.TempFile.Link(True) with self.assertRaises(TypeError): self.TempFileNX.Link(self.FileHash.encode('utf-8')) def test_Delete(self): self.TempFile.Delete() self.assertFalse(exists(self.TempFile.Path)) def test_DeleteRobust(self): os.chmod(self.TempFile.Path, 0) self.TempFile.Delete() self.assertFalse(exists(self.TempFile.Path)) def test_DeleteFail(self): with self.assertRaises(FileNotFoundError): self.TempFileNX.Delete() os.mkdir(self.TempFileNX.Path) with self.assertRaises(IsADirectoryError): self.TempFileNX.Delete() if __name__ == '__main__': unittest.main()
apache-2.0
-7,144,550,251,756,274,000
33.711447
99
0.692859
false
ios-xr/iosxr-ansible
local/library/iosxr_reload.py
1
3482
#!/usr/bin/python #------------------------------------------------------------------------------ # # Copyright (C) 2016 Cisco Systems, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #------------------------------------------------------------------------------ from ansible.module_utils.basic import * from ansible.module_utils.shell import * from ansible.module_utils.netcfg import * from iosxr_common import * from iosxr import * DOCUMENTATION = """ --- module: iosxr_reload author: Adisorn Ermongkonchai short_description: Reload IOS-XR device description: - Restart specified IOS-XR device options: host: description: - IP address or hostname (resolvable by Ansible control host) of the target IOS-XR node. required: true username: description: - username used to login to IOS-XR required: true default: none password: description: - password used to login to IOS-XR required: true default: none confirm: description: - make sure user really want to reload required: true value: "yes" or other string location: description: - location of the node that needs to be reboot e.g. 0/RP0/CPU0 required: false default: None force: description: - force reaload without doing any cleanup required: false default: false """ EXAMPLES = """ - iosxr_reload: host: '{{ ansible_ssh_host }}' username: cisco password: cisco confirm: yes """ RETURN = """ stdout: description: raw response returned: always stdout_lines: description: list of response lines returned: always """ CLI_PROMPTS_RE.append(re.compile(r'[\r\n]?[a-zA-Z]{1}[a-zA-Z0-9-]*[confirm]]')) def main(): module = get_module( argument_spec = dict( username = dict(required=False, default=None), password = dict(required=False, default=None), confirm = dict(required=True), location = dict(required=False, default=None), force = dict(required=False, type='bool', default=False) ), supports_check_mode = False ) args = module.params force = args['force'] location = args['location'] result = dict(changed=False) if args['confirm'] != 'yes': result['stdout'] = "reload aborted" module.exit_json(**result) reload_command = 'reload ' if location != None: reload_command = reload_command + 'location %s ' % location if force is True: reload_command = reload_command + 'force ' commands = [reload_command] commands.append('\r') commands.append('\r') response = execute_command(module, commands) result['stdout'] = response result['stdout_lines'] = str(result['stdout']).split(r'\n') module.exit_json(**result) if __name__ == "__main__": main()
gpl-3.0
-8,870,944,362,057,056,000
27.308943
79
0.621482
false
lueschem/edi
edi/lib/helpers.py
1
4018
# -*- coding: utf-8 -*- # Copyright (C) 2016 Matthias Luescher # # Authors: # Matthias Luescher # # This file is part of edi. # # edi is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # edi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with edi. If not, see <http://www.gnu.org/licenses/>. import sys import os from pwd import getpwnam from grp import getgrgid import socket import logging import shutil class Error(Exception): """Base class for edi exceptions.""" pass class FatalError(Error): """Exception raised for fatal errors needing corrective actions from user. Attributes: message -- explanation of the error """ def __init__(self, message): self.message = message def print_error_and_exit(*args, **kwargs): print('\033[91m', end="", file=sys.stderr) print('Error: ', end="", file=sys.stderr) print(*args, end="", file=sys.stderr, **kwargs) print('\033[0m', file=sys.stderr) sys.exit(1) def print_success(*args, **kwargs): print('\033[92m', end="") print('Success: ', end="") print(*args, end="", **kwargs) print('\033[0m') def get_user(): try: if 'SUDO_USER' in os.environ: return os.environ['SUDO_USER'] else: return os.environ['USER'] except KeyError: # Hint: there is no $USER during debuild logging.warning("Unable to get user from environment variable.") return "root" def get_user_uid(): return getpwnam(get_user()).pw_uid def get_user_gid(): return getpwnam(get_user()).pw_gid def get_user_group(): return getgrgid(get_user_gid()).gr_name def get_hostname(): return socket.gethostname() def get_edi_plugin_directory(): return os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "plugins")) def copy_tree(src, dst): for item in os.listdir(src): s = os.path.join(src, item) d = os.path.join(dst, item) if os.path.islink(s): linkto = os.readlink(s) os.symlink(linkto, d) elif os.path.isdir(s): shutil.copytree(s, d, symlinks=True) else: shutil.copy2(s, d) return dst def which(executable): def is_exe(abs_path): return os.path.isfile(abs_path) and os.access(abs_path, os.X_OK) def get_path_list(): paths = os.environ["PATH"].split(os.pathsep) # paths not visible for unprivileged user on Debian paths.append(os.path.join(os.sep, 'usr', 'local', 'sbin')) paths.append(os.path.join(os.sep, 'usr', 'sbin')) paths.append(os.path.join(os.sep, 'sbin')) # path not visible for super user on Debian paths.append(os.path.join(os.sep, 'snap', 'bin')) return paths exe_dir, _ = os.path.split(executable) if exe_dir: if is_exe(executable): return executable else: for path in get_path_list(): path = path.strip('"') exe_file_path = os.path.join(path, executable) if is_exe(exe_file_path): return exe_file_path return None def chown_to_user(path): shutil.chown(path, get_user_uid(), get_user_gid()) def get_workdir(): return os.getcwd() def get_artifact_dir(): return os.path.join(get_workdir(), 'artifacts') def create_artifact_dir(): directory = get_artifact_dir() if not os.path.isdir(directory): logging.info('''Creating artifact directory '{}'.'''.format(directory)) os.mkdir(directory) chown_to_user(directory)
lgpl-3.0
8,270,724,673,714,100,000
25.090909
84
0.627924
false
nuclio/nuclio
pkg/processor/runtime/python/test/outputter/outputter.py
1
2513
# Copyright 2017 The Nuclio Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def handler(context, event): """Given a certain body, returns a response. Used by an integration test""" # if the method is other than POST, return it as the body if event.method != 'POST': return event.method body_str = event.body.decode('utf-8') if body_str == 'return_string': return 'a string' elif body_str == 'return_status_and_string': return 201, 'a string after status' elif body_str == 'return_dict': return {'a': 'dict', 'b': 'foo'} elif body_str == 'return_list': return [{'a': 1}, {'b': 2}] elif body_str == 'return_status_and_dict': return 201, {'a': 'dict after status', 'b': 'foo'} elif body_str == 'log': context.logger.debug('Debug message') context.logger.info('Info message') context.logger.warn('Warn message') context.logger.error('Error message') return 201, 'returned logs' elif body_str == 'log_with': context.logger.error_with( 'Error message', source='rabbit', weight=7) return 201, 'returned logs with' elif body_str == 'return_response': # echo back the headers, plus add two (TODO) headers = event.headers headers['h1'] = 'v1' headers['h2'] = 'v2' return context.Response( body='response body', headers=headers, content_type='text/plain', status_code=201) elif body_str == 'return_fields': # We use sorted to get predictable output kvs = ['{}={}'.format(k, v) for k, v in sorted(event.fields.items())] return ','.join(kvs) elif body_str == 'return_path': return event.path elif body_str == 'return_binary': return b'hello' elif body_str == 'return_error': raise ValueError('some error') else: raise RuntimeError('Unknown return mode: {0}'.format(body_str))
apache-2.0
3,214,789,561,698,694,000
32.065789
79
0.617589
false
NeowithU/Trajectory
Outdated/PreProcess.py
1
5448
__author__ = 'Fang' import os import math import json import datetime import utilities as util INIT_DATA_DIR = "Raw" RAW_NODE_DATA = "3n.json" RAW_WAY_DATA = "3w.json" INER_DATA_DIR = "Intermediate" GEO_RANGE = '116.318,27.147,122.481,35.178' LOG_FILE = "Logs/PreProcess.log" WAY_DATA = "ways_dict" STEP = 0.02 class PreProcess: __step = 0.02 __nodes_dict = {} def __init__(self): s_time = datetime.datetime.now() self.__nodes_dict = self.get_nodes() with open(LOG_FILE, 'a') as log_file: e_time = datetime.datetime.now() cost_time = e_time - s_time log = "nodes_dict create cost %s\n" % str(cost_time) log_file.write(log) s_time = datetime.datetime.now() util.write_json("nodes_dict", INER_DATA_DIR, self.__nodes_dict) with open(LOG_FILE, 'a') as log_file: e_time = datetime.datetime.now() cost_time = e_time - s_time log = "nodes_dict.json save cost %s\n" % str(cost_time) log_file.write(log) s_time = datetime.datetime.now() ways_dict = self.get_ways() with open(LOG_FILE, 'a') as log_file: e_time = datetime.datetime.now() cost_time = e_time - s_time log = "ways_dict create cost %s\n" % str(cost_time) log_file.write(log) s_time = datetime.datetime.now() util.write_json("ways_dict", INER_DATA_DIR, ways_dict) with open(LOG_FILE, 'a') as log_file: e_time = datetime.datetime.now() cost_time = e_time - s_time log = "ways_dict.json save cost %s\n" % str(cost_time) log_file.write(log) def __get_nodes(self): os.chdir(INER_DATA_DIR) if not os.path.exists(RAW_NODE_DATA): util.download_map(GEO_RANGE, True) with open(RAW_NODE_DATA) as data: nodes_data = json.load(data) os.chdir("..") nodes_list = nodes_data[u"elements"] nodes_dict = dict() for item in nodes_list: if item[u"type"] == u"node": tmp_id = item[u"id"] tmp_lat = item[u"lat"] tmp_lon = item[u"lon"] nodes_dict[tmp_id] = { u"lat" : tmp_lat , u"lon" : tmp_lon} return nodes_dict def __get_ways(self): os.chdir(INIT_DATA_DIR) if not os.path.exists(RAW_WAY_DATA): util.download_map(GEO_RANGE, False) with open(RAW_WAY_DATA) as data: ways_data = json.load(data) os.chdir("..") ways_list = ways_data[u"elements"] ways_dict = dict() for item in ways_list: if (item[u"type"] == u"way") and (u"tags" in item) and (u"highway" in item[u"tags"]): tmp_id = item[u"id"] nodes_list = list() for node_id in item[u"nodes"]: if self.__nodes_dict.has_key(node_id): tmp_node = self.__nodes_dict[node_id] tmp_lat = tmp_node[u"lat"] tmp_lon = tmp_node[u"lon"] nodes_list.append({ u"lat" : tmp_lat , u"lon" : tmp_lon}) if len(nodes_list) > 1: ways_dict[tmp_id] = { u"highway" : item[u"tags"][u"highway"] , u"nodes" : nodes_list } return ways_dict def get_geo_range(): min_lat = min_lon = 1000000 max_lat = max_lon = -1000000 for item in ways_dirt.values(): for node in item[u"nodes"]: tmp_lat = float(node[u"lat"]) tmp_lon = float(node[u"lon"]) min_lat = min(min_lat, tmp_lat) min_lon = min(min_lon, tmp_lon) max_lat = max(max_lat, tmp_lat) max_lon = max(max_lon, tmp_lon) return min_lat, max_lat, min_lon, max_lon def find_grid_id(x, y): loc_x = int((x - min_lat) / STEP) if loc_x == num_lat: loc_x -= 1 loc_y = int((y - min_lon) / STEP) if loc_y == num_lon: loc_y -= 1 loc = loc_x * num_lon + loc_y return loc def gen_segment(seg_id, sx, sy, ex, ey, highway): return { seg_id : { u"highway" : highway , u"snode" : (sx, sy) , u"enode" : (ex, ey) } } def get_grids(): grids_dict = dict() for i in range(num_grids): grids_dict[i] = list() for (way_id, item) in ways_dirt.items(): nodes = item[u"nodes"] length = len(nodes) highway = item[u"highway"] last_lat = float(nodes[0][u"lat"]) last_lon = float(nodes[0][u"lon"]) last_loc = find_grid_id(last_lat, last_lon) for i in range(1,length): tmp_lat = float(nodes[i][u"lat"]) tmp_lon = float(nodes[i][u"lon"]) tmp_loc = find_grid_id(tmp_lat, tmp_lon) grids_dict[last_loc].append(gen_segment(unicode(last_lon) + u"_" + unicode(way_id) + u"_" + unicode(i - 1), last_lat, last_lon, tmp_lat, tmp_lon, highway)) if(tmp_loc != last_loc): grids_dict[tmp_loc].append(gen_segment(unicode(tmp_loc) + u"_" + unicode(way_id) + u"_" + unicode(i - 1), last_lat, last_lon, tmp_lat, tmp_lon, highway)) last_lat = tmp_lat last_lon = tmp_lon last_loc = tmp_loc return grids_dict
mit
-4,385,657,036,776,436,700
36.840278
173
0.505874
false
k1643/StratagusAI
tools/the-right-strategy-basegen.py
1
3963
#!/bin/env python # # generate a production base. # player_config = """ -- player configuration SetStartView(0, 28, 3) SetPlayerData(0, "Resources", "gold", 40000) SetPlayerData(0, "Resources", "wood", 40000) SetPlayerData(0, "Resources", "oil", 40000) SetPlayerData(0, "RaceName", "human") SetAiType(0, "wc2-passive") SetStartView(1, 50, 53) SetPlayerData(1, "Resources", "gold", 40000) SetPlayerData(1, "Resources", "wood", 40000) SetPlayerData(1, "Resources", "oil", 40000) SetPlayerData(1, "RaceName", "human") SetAiType(1, "{0}") SetPlayerData(15, "RaceName", "neutral") """ player_config_switched = """ -- player configuration SetStartView(0, 50, 53) SetPlayerData(0, "Resources", "gold", 40000) SetPlayerData(0, "Resources", "wood", 40000) SetPlayerData(0, "Resources", "oil", 40000) SetPlayerData(0, "RaceName", "human") SetAiType(0, "wc2-passive") SetStartView(1, 28, 3) SetPlayerData(1, "Resources", "gold", 40000) SetPlayerData(1, "Resources", "wood", 40000) SetPlayerData(1, "Resources", "oil", 40000) SetPlayerData(1, "RaceName", "human") SetAiType(1, "{0}") SetPlayerData(15, "RaceName", "neutral") """ def gen_base(playerId,pos): th = pos[0] # townhall ml = pos[1] # mill bk = pos[2] # barracks bk2 = pos[3] # barracks2 gp = pos[4] # farm group gp2 = pos[5] # farm group 2 ps = pos[6] # peasant base = """ unit = CreateUnit("unit-town-hall", {0}, {{{1}, {2}}}) unit = CreateUnit("unit-elven-lumber-mill", {0}, {{{3}, {4}}}) unit = CreateUnit("unit-human-barracks", {0}, {{{5}, {6}}}) unit = CreateUnit("unit-human-barracks", {0}, {{{7}, {8}}}) unit = CreateUnit("unit-peasant", {0}, {{{7}, {8}}}) """.format(playerId,th[0],th[1],ml[0],ml[1],bk[0],bk[1],bk2[0],bk2[1],ps[0],ps[1]) farmgrp = """ unit = CreateUnit("unit-farm", {0}, {{{3}, {1}}}) unit = CreateUnit("unit-farm", {0}, {{{4}, {1}}}) unit = CreateUnit("unit-farm", {0}, {{{5}, {1}}}) unit = CreateUnit("unit-farm", {0}, {{{3}, {2}}}) unit = CreateUnit("unit-farm", {0}, {{{4}, {2}}}) unit = CreateUnit("unit-farm", {0}, {{{5}, {2}}}) """.format(playerId,gp[1],gp[1]+2,gp[0],gp[0]+2,gp[0]+4) farmgrp2 = """ unit = CreateUnit("unit-farm", {0}, {{{1}, {2}}}) unit = CreateUnit("unit-farm", {0}, {{{1}, {3}}}) unit = CreateUnit("unit-farm", {0}, {{{4}, {2}}}) unit = CreateUnit("unit-farm", {0}, {{{4}, {3}}}) """.format(playerId,gp2[0],gp2[1],gp2[1]+2,gp2[0]+2) return base + farmgrp + farmgrp2 def write_map(filename,switched,AI1): out = open(filename,'wb') tiles = open('the-right-strategy-game-tiles.txt','rb') if switched: out.write(player_config_switched.format(AI1)) else: out.write(player_config.format(AI1)) for line in tiles.readlines(): out.write(line) tiles.close() top_player = 0 bottom_player = 1 if switched: top_player = 1 bottom_player = 0 # bases in top half of map out.write(gen_base(top_player,tp)) # playerId, positions, farmgroup horizontal # bases in bottom half of map out.write(gen_base(bottom_player,bt)) out.close() # building positions. townhall,mill,barracks,barracks2,farmgroup, farmgroup2,peasant tp = [(32,11),(36,3),(31,7),(23,6),(23,0),(37,8),(30,11)] # top bt = [(35,54),(40,55),(29,57),(34,50),(27,51),(44,53),(38,52)] # bottom ################################################################################ # # main # ################################################################################ if __name__ == '__main__': # # bases # write_map('../maps/the-right-strategy.sms',False,'wc2-passive') write_map('../maps/the-right-strategy_PvC.sms',False,'wc2-land-attack') # # switched bases # write_map('../maps/the-right-strategy_switched.sms',True,'wc2-passive') write_map('../maps/the-right-strategy_switched_PvC.sms',True,'wc2-land-attack')
apache-2.0
-2,548,080,232,074,289,700
32.763158
84
0.568004
false
dmnfarrell/smallrnaseq
smallrnaseq/tests.py
2
4083
#!/usr/bin/env python """ mirnaseq unit tests Created September 2016 Copyright (C) Damien Farrell """ from __future__ import absolute_import, print_function import sys, os import pandas as pd import unittest from . import config, base, utils, analysis, mirdeep2, aligners, novel class BasicTests(unittest.TestCase): """Basic tests for mirnaseq""" def setUp(self): self.df = None self.testdir = 'testing' if not os.path.exists(self.testdir): os.mkdir(self.testdir) aligners.BOWTIE_INDEXES = 'testing/indexes' return def test_collapse_reads(self): """collapse reads test""" f = os.path.join(base.datadir, 'bovine_serum_sample.fastq') base.collapse_reads(f) return def test_build_index(self): filename = os.path.join(base.datadir, 'bosTau8-tRNAs.fa') aligners.build_bowtie_index(filename, 'testing/indexes') #aligners.build_subread_index(filename, 'testing/indexes') return def test_htseq(self): """htseq basic test for sam file reading""" import HTSeq samfile = os.path.join(base.datadir, 'test.sam') sam = HTSeq.SAM_Reader(samfile) f=[] for a in sam: if a.aligned == True: seq = a.read.seq.decode() f.append((seq,a.read.name,a.iv.chrom)) df = pd.DataFrame(f, columns=['seq','read','name']) #print (df) return def test_pandas(self): """dataframe-fasta tests""" f = os.path.join(base.datadir, 'test_collapsed.fa') df = utils.fasta_to_dataframe(f) #utils.dataframe_to_fasta(df) return def test_read_aligned(self): """read in alignment/counts test""" samfile = os.path.join(base.datadir, 'test.sam') cfile = os.path.join(base.datadir, 'test_collapsed.fa') reads = utils.get_aligned_reads(samfile, cfile) return def test_count_aligned(self): """count aligned test""" samfile = os.path.join(base.datadir, 'test.sam') cfile = os.path.join(base.datadir, 'test_collapsed.fa') counts = base.count_aligned(samfile, cfile) #print (counts) return def test_count_features(self): """feature counting""" #countfile = os.path.join(outpath, '%s.csv' %label) #readcounts = pd.read_csv(countfile, index_col=0) #hits = count_features(samfile, features=exons, truecounts=readcounts) return def test_map_rnas(self): """mapping to libraries""" aligners.BOWTIE_PARAMS = '-v 0 --best' fastafile = os.path.join(base.datadir, 'bosTau8-tRNAs.fa') aligners.build_bowtie_index(fastafile, path='testing/indexes') path = os.path.join(self.testdir, 'ncrna_map') f = os.path.join(base.datadir, 'bovine_serum_sample.fastq') res = base.map_rnas([f], ['bosTau8-tRNAs'], path, overwrite=True, aligner='bowtie') return def test_map_mirnas(self): """mirna counting test""" f = os.path.join(base.datadir, 'bovine_plasma_sample.fastq') path = os.path.join(self.testdir, 'ncrna_map') #res = base.map_mirbase(files=[f], overwrite=True, outpath=path, # aligner='bowtie', species='bta') return def test_map_features(self): """Genomic feature mapping/counting""" aligners.BOWTIE_PARAMS = '-v 0 --best' #fastafile = os.path.join(base.datadir, 'bostau.fa') #base.build_bowtie_index(fastafile) path = os.path.join(self.testdir, 'ncrna_map') f = os.path.join(base.datadir, 'bovine_serum_sample.fastq') return def test_mirdeep(self): """mirdeep2 script test""" conffile = 'testing/test_mdp.conf' cp = config.parse_config(conffile) options = config.get_options(cp) #print (options) #mirdeep2.run_multiple(**options) return def run(): unittest.main() if __name__ == '__main__': unittest.main()
gpl-3.0
-2,935,913,632,993,654,000
30.167939
91
0.597355
false
paylogic/halogen
tests/vnd/test_error.py
1
1761
# coding=utf8 """Test vnd.error.""" import uuid import halogen from halogen.vnd.error import Error, VNDError class APIError(Error): def __init__(self, status_code, **kwargs): super(APIError, self).__init__(**kwargs) self.status_code = status_code self.logref = uuid.uuid4().hex class AuthorSchema(halogen.Schema): name = halogen.Attr(required=True) class PublisherSchema(halogen.Schema): name = halogen.Attr(required=True) address = halogen.Attr() class BookSchema(halogen.Schema): title = halogen.Attr(required=True) year = halogen.Attr(halogen.types.Int(), required=True) authors = halogen.Attr(halogen.types.List(AuthorSchema), required=True) publisher = halogen.Attr(PublisherSchema) def test_validation(): try: BookSchema.deserialize( dict( # title is skipped year=u"☃bc", # Not an integer authors=[dict(name="John Smith"), dict()], # Second author has no name publisher=dict(address="Chasey Lane 42, Los Angeles, US"), # No name ), ) except halogen.exceptions.ValidationError as e: error = APIError.from_validation_exception(e, status_code=400) assert error.status_code == 400 data = VNDError.serialize(error) assert uuid.UUID(data["logref"]) assert data["message"] == "Validation error." expected_errors = [ dict(path="/authors/1/name", message="Missing attribute."), dict(path="/publisher/name", message="Missing attribute."), dict(path="/title", message="Missing attribute."), dict(path="/year", message=u"'☃bc' is not an integer"), ] assert data["_embedded"]["errors"] == expected_errors
mit
1,149,635,078,988,311,800
26.888889
87
0.632897
false
zrafa/ev
python+tk+opencv/mipsx_tk_gui.py
1
8316
#!/usr/bin/python # -*- coding: utf-8 -*- """ Autor original del ejemplo de una aplicacion Tk: Jan Bodnar last modified: December 2010 website: www.zetcode.com Modificado y ampliado para ser una GUI de GDB para MIPS. (C) 2014 - Rafael Ignacio Zurita <[email protected]> Lea el archivo README.md para conocer la licencia de este programa. """ import time import sys import random from subprocess import Popen, PIPE, STDOUT from Tkinter import * from ttk import Frame, Button, Label, Style # Para extrar el nombre de archivo sin ruta import ntpath from ScrolledText import * import tkFileDialog import tkMessageBox class MipsxTkGui(Frame): def __init__(self, parent, control): Frame.__init__(self, parent) self.parent = parent self.parent.title("Mipsx - GUI for gdb multiarch") self.style = Style() self.style.theme_use("default") self.pack(fill=BOTH, expand=1) # Para expandir cuando las ventanas cambian de tamao for i in range(3): self.columnconfigure(i, weight=1) for i in range(20): self.rowconfigure(i, weight=1) lbl = Label(self, text="Foto") lbl.grid(row=3,column=0, sticky=W, pady=4, padx=5) self.registros = Text(self,height=12,width=40) self.registros.grid(row=4, column=0, columnspan=1, rowspan=5, sticky=E+W+S+N) lbl = Label(self, text="Foto 2") lbl.grid(row=3,column=1, sticky=W, pady=4, padx=5) self.foto2 = Text(self,height=12,width=40) self.foto2.grid(row=4, column=1, sticky=E+W+S+N) lbl = Label(self, text="Numero de la Video Camara") lbl.grid(row=2,column=0, sticky=W, pady=1, padx=5) self.camara = Entry(self) self.camara.grid(row=2, column=1, columnspan=1, padx=1, sticky=E+W+S+N) lbl = Label(self, text="Limite de grosor") lbl.grid(row=1,column=0, sticky=W, pady=4, padx=5) self.editor = Entry(self) self.editor.grid(row=1, column=1, columnspan=1, padx=1, sticky=E+W+S+N) menu = Menu(root) root.config(menu=menu) filemenu = Menu(menu) menu.add_command(label="Tomar Foto", command=control.ejecutar) helpmenu = Menu(menu) menu.add_cascade(label="Ayuda", menu=helpmenu) helpmenu.add_command(label="Acerca de...", command=control.acercade) menu.add_command(label="Salir", command=control.salir) def limpiar_panel(self, panel): panel.delete('1.0',END) def panel_agregar(self, panel, contenido): panel.insert(END, contenido) def panel_leer(self, panel): return panel.get('1.0', END+'-1c') def mostrar_en_area(self, area): print "hola" # Al abrir un archivo deseamos tener un area de trabajo cero def limpiar_paneles(self): self.mensajes.delete('1.0',END) self.memoria.delete('1.0',END) self.programa.delete('1.0',END) self.registros.delete('1.0',END) class MipsxControl(Frame): def __init__(self, parent): self.paneles = MipsxTkGui(parent, self) self.ejecucion = False # Variables globales self.archivoactual = "hello.s" self.archivoacompilar = "hello.s" # ip_mips = "10.0.15.232" self.ip_mips = "10.0.15.50" # ip_mips = "192.168.0.71" # Abrimos el archivo base # Si se finaliza el programa con click en el boton X llamamos a salir root.protocol("WM_DELETE_WINDOW", self.salir) def prox_instruccion(self): gdb.stdin.write('step 1\n') self.mostrar_en(self.paneles.mensajes, "proximo") self.estado() if self.ejecucion: self.memoria() self.registros() self.listado() def ejecutar(self): while self.ejecucion: self.prox_instruccion() def salida(self, w, findelinea): self.paneles.limpiar_panel(w) a = gdb.stdout.readline() while not findelinea in a: # Esto es para saber si la ejecucion termino'. # TODO: Hay que quitarlo de este metodo. Donde ponerlo? if "No stack" in a: self.ejecucion = False # w.insert(END,'\n\nEjecucion FINALIZADA\n\n') self.paneles.panel_agregar(w,'\n\nEjecucion FINALIZADA\n\n') a = a.replace('(gdb) ', '') # w.insert(END,a) self.paneles.panel_agregar(w, a) a = gdb.stdout.readline() def mostrar_en(self, w, findelinea): gdb.stdin.write(findelinea) gdb.stdin.write('\r\n') self.salida(w, findelinea) def mostrar_en_depuracion(self): print "hola 3" def memoria(self): # Para mostrar el segmento de datos, la etiqueta memoria debe estar al principio gdb.stdin.write('info address memoria\n') gdb.stdin.write('infomemoria\n') a = gdb.stdout.readline() solicitar_seg_de_datos = "" while not "infomemoria" in a: print "a : "+a if "Symbol " in a: a = a.replace('(gdb) Symbol "memoria" is at ', '') a = a.replace(' in a file compiled without debugging.','') solicitar_seg_de_datos = "x/40xw "+a+"\n" a = gdb.stdout.readline() if solicitar_seg_de_datos == "": gdb.stdin.write('x/40xw $pc\n') else: gdb.stdin.write(solicitar_seg_de_datos) gdb.stdin.write('x/50xw main\n') gdb.stdin.write('x/128xw $sp - 128\n') self.mostrar_en(self.paneles.memoria, "memoria") def estado(self): gdb.stdin.write('info frame\n') self.mostrar_en(self.paneles.mensajes, "estado") def registros(self): gdb.stdin.write('info register\n') self.mostrar_en(self.paneles.registros, "registros") def listado(self): gdb.stdin.write('list 1,100\n') # gdb.stdin.write('disas main\n') gdb.stdin.write('disas \n') self.mostrar_en(self.paneles.programa, "listado") def compilarycargar(self): self.paneles.limpiar_panel(self.paneles.mensajes) self.paneles.panel_agregar(self.paneles.mensajes, "Compilando y Cargando ...\r\n") root.update_idletasks() # Nos liberamos del debugging actual gdb.stdin.write('detach \n') self.guardar_archivo_a_compilar() def abrir_en_editor(self, archivo): self.paneles.limpiar_panel(self.paneles.editor) self.archivoactual = archivo def abrir(self): FILEOPENOPTIONS = dict(defaultextension='*.s', filetypes=[('Archivo assembler','*.s'), ('Todos los archivos','*.*')]) file = tkFileDialog.askopenfile(parent=root,mode='rb',title='Select a file', **FILEOPENOPTIONS) if file != None: self.paneles.limpiar_paneles() self.abrir_en_editor(file.name) def guardar_archivo_a_compilar(self): print "hola 3" def guardar(self): file = tkFileDialog.asksaveasfile(mode='w') if file != None: # slice off the last character from get, as an extra return is added # data = editor.get('1.0', END+'-1c') data = self.paneles.panel_leer(self.paneles.editor) file.write(data) file.close() self.archivoactual = file.name print self.archivoactual def exit_command(self): if tkMessageBox.askokcancel("Quit", "Do you really want to quit?"): root.destroy() def acercade(self): label = tkMessageBox.showinfo("Acerca de", "MIPSX - GUI for gdb multiarch\n\nEntorno de desarrollo en lenguaje assembler arquitectura MIPS\nEste programa ensabla, genera el programa ejecutable, y lo ejecuta en modo debug en una maquina MIPS real\n\nCopyright 2014 Rafael Ignacio Zurita\n\nFacultad de Informatica\nUniversidad Nacional del Comahue\n\nThis program is free software; you can redistribute it and/or modify it under the terms of the GPL v2") def nuevo(self): self.paneles.limpiar_panel(self.paneles.editor) def no_hacer_nada(self): print "nada por hacer" def archivo_sin_guardar(self): data = self.paneles.panel_leer(self.paneles.editor) res = tkMessageBox.askquestion("Confirmar", "Archivo sin guardar\nEsta seguro de finalizar el programa?", icon='warning') if res == 'yes': return False return True def salir(self): # ip_mips = "10.0.15.50" # tub = Popen(['mipsx_finalizar_gdbserver.sh', ip_mips, self.PUERTOyPS], stdout=PIPE, stdin=PIPE, stderr=STDOUT) # streamdata = tub.communicate()[0] # Borrar todos los temporales quit() def main(): root.mainloop() if __name__ == '__main__': root = Tk() # Para expandir cuando las ventanas cambian de tamao root.columnconfigure(0,weight=1) root.rowconfigure(0, weight=1) app = MipsxControl(root) main()
gpl-2.0
-8,050,226,486,350,915,000
25.4
458
0.660895
false
aginzberg/crowdsource-platform
csp/urls.py
1
4436
from django.conf.urls import patterns, include, url from django.contrib.staticfiles.urls import staticfiles_urlpatterns from crowdsourcing import views from mturk import views as mturk_views from crowdsourcing.viewsets.project import * from crowdsourcing.viewsets.user import UserViewSet, UserProfileViewSet, UserPreferencesViewSet from crowdsourcing.viewsets.requester import RequesterViewSet, QualificationViewSet from crowdsourcing.viewsets.rating import WorkerRequesterRatingViewset, RatingViewset from crowdsourcing.viewsets.worker import * from crowdsourcing.viewsets.task import TaskViewSet, TaskWorkerResultViewSet, TaskWorkerViewSet, \ ExternalSubmit from crowdsourcing.viewsets.template import TemplateViewSet, TemplateItemViewSet, TemplateItemPropertiesViewSet from crowdsourcing.viewsets.drive import * from crowdsourcing.viewsets.google_drive import GoogleDriveOauth, GoogleDriveViewSet from crowdsourcing.viewsets.message import ConversationViewSet, MessageViewSet, RedisMessageViewSet, \ ConversationRecipientViewSet from crowdsourcing.viewsets.file import FileViewSet from crowdsourcing.viewsets.payment import PayPalFlowViewSet, FinancialAccountViewSet from rest_framework.routers import SimpleRouter from mturk.viewsets import MTurkAssignmentViewSet, MTurkConfig, MTurkAccountViewSet router = SimpleRouter(trailing_slash=True) router.register(r'api/profile', UserProfileViewSet) router.register(r'api/user', UserViewSet) router.register(r'api/preferences', UserPreferencesViewSet) router.register(r'api/worker-requester-rating', WorkerRequesterRatingViewset) router.register(r'api/rating', RatingViewset) router.register(r'api/requester', RequesterViewSet) router.register(r'api/project', ProjectViewSet) router.register(r'api/category', CategoryViewSet) router.register(r'api/worker-skill', WorkerSkillViewSet) router.register(r'api/worker', WorkerViewSet) router.register(r'api/skill', SkillViewSet) router.register(r'api/task', TaskViewSet) router.register(r'api/task-worker', TaskWorkerViewSet) router.register(r'api/task-worker-result', TaskWorkerResultViewSet) router.register(r'api/qualification', QualificationViewSet) router.register(r'api/template', TemplateViewSet) router.register(r'api/template-item', TemplateItemViewSet) router.register(r'api/template-item-properties', TemplateItemPropertiesViewSet) router.register(r'api/drive-account', AccountModelViewSet) router.register(r'api/conversation', ConversationViewSet) router.register(r'api/conversation-recipients', ConversationRecipientViewSet) router.register(r'api/message', MessageViewSet) router.register(r'api/inbox', RedisMessageViewSet, base_name='redis-message') # router.register(r'api/google-drive', GoogleDriveOauth) router.register(r'api/payment-paypal', PayPalFlowViewSet) router.register(r'api/financial-accounts', FinancialAccountViewSet) router.register(r'^api/file', FileViewSet) mturk_router = SimpleRouter(trailing_slash=False) mturk_router.register(r'^api/mturk', MTurkAssignmentViewSet) mturk_router.register(r'^api/mturk-account', MTurkAccountViewSet) urlpatterns = patterns('', url(r'^api/v1/auth/registration-successful', views.registration_successful), url(r'^api/auth/login/$', views.Login.as_view()), url(r'^api/auth/logout/$', views.Logout.as_view()), url(r'^api/oauth2/', include('oauth2_provider.urls', namespace='oauth2_provider')), url(r'^api/oauth2-ng/token', views.Oauth2TokenView.as_view()), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), url(r'^api/google-drive/init', GoogleDriveOauth.as_view({'post': 'auth_init'})), url(r'^api/google-drive/finish', GoogleDriveOauth.as_view({'post': 'auth_end'})), url(r'^api/google-drive/list-files', GoogleDriveViewSet.as_view({'get': 'query'})), url(r'^api/done/$', ExternalSubmit.as_view()), url(r'', include(router.urls)), url(r'^mturk/task', mturk_views.mturk_index), url(r'', include(mturk_router.urls)), url(r'^api/mturk/url', MTurkConfig.as_view({'get': 'get_mturk_url'})), url('^.*$', views.home, name='home'), ) urlpatterns += staticfiles_urlpatterns()
mit
8,778,232,125,503,826,000
57.368421
111
0.74211
false
kfoltman/useq
pyuseq.py
1
3064
import ctypes class EventEncoding: @staticmethod def note_off(channel, note, vel): return 0x8003 + (channel - 1) * 256 + (note << 16) + (vel << 24) @staticmethod def note_on(channel, note, vel): return 0x9003 + (channel - 1) * 256 + (note << 16) + (vel << 24) @staticmethod def polyphonic_aftertouch(channel, note, vel): return 0xa003 + (channel - 1) * 256 + (note << 16) + (vel << 24) @staticmethod def cc(channel, note, vel): return 0xb003 + (channel - 1) * 256 + (note << 16) + (vel << 24) @staticmethod def program_change(channel, program): return 0xc002 + (channel - 1) * 256 + (program << 16) @staticmethod def channel_aftertouch(channel, pressure): return 0xd002 + (channel - 1) * 256 + (pressure << 16) @staticmethod def pitch_wheel(channel, pitch): # note: pitch = 0..16383 (centre is 8192) return 0xe003 + (channel - 1) * 256 + ((pitch & 127) << 16) + ((pitch // 128) << 24) class EncodeAsAscii(object): @classmethod def from_param(klass, value): if isinstance(value, str): return value.encode('ascii') elif isinstance(value, bytes): return value else: return str(value).encode('ascii') def init(): def __fn(name, res, argtypes): func = getattr(dll, name) func.restype = res func.argtypes = argtypes globals()[name] = func dll = ctypes.CDLL("./useq.so") __fn('useq_create', ctypes.c_void_p, []) __fn('useq_destroy', None, [ctypes.c_void_p]) __fn('useq_load_smf', ctypes.c_int, [ctypes.c_void_p, EncodeAsAscii]) __fn('useq_test', None, [ctypes.c_void_p]) __fn('useq_destroy_song', None, [ctypes.c_void_p]) __fn('useq_state_add_output', None, [ctypes.c_void_p, ctypes.c_void_p]) __fn('useq_state_set_tempo_ppqn', None, [ctypes.c_void_p, ctypes.c_float, ctypes.c_int]) __fn('useq_state_set_length', None, [ctypes.c_void_p, ctypes.c_int]) __fn('useq_jack_create', ctypes.c_int, [ctypes.c_void_p, EncodeAsAscii]) __fn('useq_jack_activate', None, [ctypes.c_void_p]) __fn('useq_jack_get_client', ctypes.c_void_p, [ctypes.c_void_p]) __fn('useq_jack_get_client_name', ctypes.c_char_p, [ctypes.c_void_p]) __fn('useq_jack_deactivate', None, [ctypes.c_void_p]) __fn('useq_jack_destroy', None, [ctypes.c_void_p]) __fn('useq_track_new', ctypes.c_void_p, [ctypes.c_int]) __fn('useq_track_set_event', None, [ctypes.c_void_p, ctypes.c_int, ctypes.c_int, ctypes.c_int]) __fn('useq_track_destroy', None, [ctypes.c_void_p]) __fn('useq_output_new', ctypes.c_void_p, [ctypes.c_void_p, EncodeAsAscii]) __fn('useq_output_add_track', None, [ctypes.c_void_p, ctypes.c_void_p]) __fn('useq_output_replace_track', None, [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]) __fn('useq_output_destroy', None, [ctypes.c_void_p]) dll = ctypes.CDLL("libjack.so.0") __fn("jack_connect", None, [ctypes.c_void_p, EncodeAsAscii, EncodeAsAscii]) init()
lgpl-3.0
-959,800,428,551,267,500
39.315789
99
0.596932
false
kvh/ramp
ramp/shortcuts.py
1
6990
import pandas as pd try: import pylab as pl except ImportError: pl = None import sklearn from sklearn.grid_search import ParameterSampler, ParameterGrid from ramp.model_definition import ModelDefinition, model_definition_factory from ramp import metrics, modeling, reporters from ramp.reporters import MetricReporter, colors class CVResult(object): def __init__(self, results, reporters=None, metrics=None): self.results = results self.reporters = reporters self.metrics = metrics self.model_def = self.results[0].model_def for r in metrics + reporters: if not r.processed: r.process_results(self.results) def __repr__(self): return repr(self.summary_df()) def _repr_html_(self): return self.summary_df()._repr_html_() def summary_df(self): df = pd.concat([r.summary_df for r in self.metrics]) df.index = [m.name for m in self.metrics] return df def summary(self): return self.summary_df() def plot(self): fig, axes = pl.subplots(1, len(self.metrics)) for i, m in enumerate(self.metrics): m.plot(fig=fig, ax=axes[i]) def classification_curve(self, x_metric, y_metric): x_metric = metrics.as_ramp_metric(x_metric) y_metric = metrics.as_ramp_metric(y_metric) dtmr = reporters.DualThresholdMetricReporter(x_metric, y_metric) dtmr.process_results(self.results) return dtmr def feature_importances(self): reporter = reporters.RFFeatureImportances() return self.build_report(reporter) def build_report(self, report): report.process_results(self.results) return report class CVComparisonResult(object): def __init__(self, model_defs, cvresults): self.cvresults = cvresults self.model_defs = model_defs self.metrics = self.cvresults[0].metrics self.reporters = self.cvresults[0].reporters self.model_abbrs = ["Model %d" % (i+1) for i, md in enumerate(self.model_defs)] self.n = len(self.cvresults) def __repr__(self): return '\n'.join("%s\n%s" % (k, repr(v.summary_df()))) def _repr_html_(self): return self.summary_df()._repr_html_() def summary_df(self): df = pd.concat([r.summary_df() for r in self.cvresults]) df.index = pd.MultiIndex.from_product([self.model_abbrs, [m.name for m in self.metrics]]) return df def summary(self): return self.summary_df() def model_legend(self): df = pd.DataFrame([cvr.model_def.describe() for cvr in self.cvresults]) df.index = self.model_abbrs return df def plot(self): fig, axes = pl.subplots(1, len(self.metrics)) fig.set_size_inches(12, 6) fig.tight_layout() for i, result in enumerate(self.cvresults): for m, ax in zip(result.metrics, axes): clr = colors[i % len(colors)] m.plot(fig, ax, index=i, color=clr) for m, ax in zip(self.metrics, axes): ax.set_xlim(-0.5, self.n - 0.5) ax.set_xticks(range(self.n)) ax.set_title(m.metric.name) ax.set_xticklabels(self.model_abbrs, rotation=45 + min(1, self.n / 10) * 35) ax.autoscale(True, 'y') def cross_validate(data=None, folds=5, repeat=1, metrics=None, reporters=None, model_def=None, **kwargs): """Shortcut to cross-validate a single configuration. ModelDefinition variables are passed in as keyword args, along with the cross-validation parameters. """ md_kwargs = {} if model_def is None: for arg in ModelDefinition.params: if arg in kwargs: md_kwargs[arg] = kwargs.pop(arg) model_def = ModelDefinition(**md_kwargs) if metrics is None: metrics = [] if reporters is None: reporters = [] metrics = [MetricReporter(metric) for metric in metrics] results = modeling.cross_validate(model_def, data, folds, repeat=repeat, **kwargs) for r in reporters + metrics: r.process_results(results) return CVResult(results, reporters, metrics) def cv_factory(data=None, folds=5, repeat=1, reporters=[], metrics=None, cv_runner=None, **kwargs): """Shortcut to iterate and cross-validate models. All ModelDefinition kwargs should be iterables that can be passed to model_definition_factory. Parameters: ___________ data: Raw DataFrame folds: If an int, than basic k-fold cross-validation will be done. Otherwise must be an iterable of tuples of pandas Indexes [(train_index, test_index), ...] repeat: How many times to repeat each cross-validation run of each model. Only makes sense if cross-validation folds are randomized. kwargs: Can be any keyword accepted by `ModelDefinition`. Values should be iterables. """ cv_runner = cv_runner or cross_validate md_kwargs = {} for arg in ModelDefinition.params: if arg in kwargs: md_kwargs[arg] = kwargs.pop(arg) model_def_fact = model_definition_factory(ModelDefinition(), **md_kwargs) results = [] model_defs = list(model_def_fact) for model_def in model_defs: reporters = [reporter.copy() for reporter in reporters] cvr = cv_runner(model_def=model_def, data=data, folds=folds, repeat=repeat, reporters=reporters, metrics=metrics, **kwargs) results.append(cvr) return CVComparisonResult(model_defs, results) def param_search(estimator, param_dict, n_iter=None, seed=None): """ Generator for cloned copies of `estimator` set with parameters as specified by `param_dict`. `param_dict` can contain either lists of parameter values (grid search) or a scipy distribution function to be sampled from. If distributions, you must specify `n_iter`. Parameters: ___________ estimator: sklearn-like estimator param_dict: dict of parameter name: values, where values can be an iterable or a distribution function n_iter: number of draws to take from parameter distributions """ if n_iter is None: param_iter = ParameterGrid(param_dict) else: param_iter = ParameterSampler(param_dict, n_iter, random_state=seed) estimators = [] for params in param_iter: new_estimator = sklearn.clone(estimator) new_estimator.set_params(**params) estimators.append(new_estimator) return estimators
mit
1,052,647,481,931,699,700
31.663551
87
0.601288
false
canaryhealth/armor
setup.py
1
2740
#!/usr/bin/env python # -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # file: $Id$ # auth: Philip J Grabner <[email protected]> # date: 2015/11/02 # copy: (C) Copyright 2015-EOT Canary Health, Inc., All Rights Reserved. #------------------------------------------------------------------------------ import os, sys, setuptools from setuptools import setup, find_packages # require python 2.7+ if sys.hexversion < 0x02070000: raise RuntimeError('This package requires python 2.7 or better') heredir = os.path.abspath(os.path.dirname(__file__)) def read(*parts, **kw): try: return open(os.path.join(heredir, *parts)).read() except: return kw.get('default', '') test_dependencies = [ 'nose >= 1.3.0', 'coverage >= 3.5.3', ] dependencies = [ 'six >= 1.6.1', 'FormEncode >= 1.2.5', ] extras_dependencies = { } classifiers = [ 'Development Status :: 1 - Planning', # 'Development Status :: 2 - Pre-Alpha', # 'Development Status :: 3 - Alpha', # 'Development Status :: 4 - Beta', # 'Development Status :: 5 - Production/Stable', # 'Development Status :: 6 - Mature', # 'Development Status :: 7 - Inactive', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Environment :: Console', 'Operating System :: OS Independent', 'Topic :: Internet', 'Topic :: Security', 'Topic :: Software Development', 'Topic :: Software Development :: Object Brokering', 'Topic :: Utilities', 'Natural Language :: English', 'License :: OSI Approved :: MIT License', 'License :: Public Domain', ] setup( name = 'armor', version = read('VERSION.txt', default='0.0.1').strip(), description = 'A data validation and sanitization library', long_description = read('README.rst'), classifiers = classifiers, author = 'Canary Health Inc', author_email = '[email protected]', url = 'http://github.com/canaryhealth/armor', keywords = 'data normalize validate validation sanitize sanitization', packages = find_packages(), include_package_data = True, zip_safe = True, install_requires = dependencies, extras_require = extras_dependencies, tests_require = test_dependencies, test_suite = 'armor', entry_points = '', license = 'MIT (http://opensource.org/licenses/MIT)', ) #------------------------------------------------------------------------------ # end of $Id$ # $ChangeLog$ #------------------------------------------------------------------------------
mit
-3,236,055,758,123,467,300
32.82716
85
0.525182
false
iannesbitt/iannesbitt.org
livehome/views.py
1
1152
from __future__ import unicode_literals from django.shortcuts import render from django.utils import timezone from django.http import HttpResponse from .models import Outdoor, Measurement def livehome(request): m = Measurement.objects.all()[:96] context = { 'm': m, 'page': 'Indoor Conditions', } return render(request, 'livehome/livehome.html', context) def upload(request): if 'NA' == request.GET.get('t', 'NA') or 'NA' == request.GET.get('h', 'NA'): html = "<html><body>Error: no data sent</body></html>" return HttpResponse(html) elif -999 == request.GET['t'] or -999 == request.GET['h']: o.time = timezone.now() o.temperature = request.GET['t'] o.humidity = request.GET['h'] o.save() html = "<html><body>Error: invalid data</body></html>" return HttpResponse(html) else: o, e = Outdoor.objects.get_or_create(id=1) o.time = timezone.now() o.temperature = request.GET['t'] o.humidity = request.GET['h'] o.save() html = "<html><body>Success</body></html>" return HttpResponse(html)
mpl-2.0
1,059,811,628,350,712,000
31.914286
80
0.598958
false
sbnoemi/notorhot
notorhot/admin.py
1
1546
from django.contrib import admin from sorl.thumbnail.admin import AdminImageMixin from notorhot.models import Competition, Candidate, CandidateCategory class CompetitionAdmin(admin.ModelAdmin): list_display = ('__unicode__', 'category', 'date_presented', 'date_voted', 'winner',) date_hierarchy = 'date_voted' list_filter = ('category',) search_fields = ('left__name', 'right__name',) raw_id_fields = ('left', 'right', 'winner', 'category',) class CompetitionInline(admin.TabularInline): model = Competition class LeftCompetitionInline(CompetitionInline): fk_name = 'left' extra = 0 class RightCompetitionInline(CompetitionInline): fk_name = 'right' extra = 0 class CandidateAdmin(AdminImageMixin, admin.ModelAdmin): list_display = ('__unicode__', 'pic', 'category', 'is_enabled', 'challenges', 'votes', 'wins',) date_hierarchy = 'added' search_fields = ('name',) list_filter = ('is_enabled', 'category',) raw_id_fields = ('category',) class CandidateInline(AdminImageMixin, admin.StackedInline): model = Candidate inline_classes = ('grp-collapse grp-open',) class CategoryAdmin(admin.ModelAdmin): inlines = [CandidateInline,] list_display = ('__unicode__', 'is_public', 'num_voted_competitions') list_filter = ('is_public',) search_fields = ('name',) admin.site.register(Competition, CompetitionAdmin) admin.site.register(Candidate, CandidateAdmin) admin.site.register(CandidateCategory, CategoryAdmin)
bsd-3-clause
-300,966,302,791,973,800
28.188679
79
0.679819
false
kaday/cylc
lib/cylc/passphrase.py
1
9283
#!/usr/bin/env python # THIS FILE IS PART OF THE CYLC SUITE ENGINE. # Copyright (C) 2008-2015 NIWA # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import re import sys import random import string from stat import * import cylc.flags from cylc.mkdir_p import mkdir_p from cylc.suite_host import get_hostname, is_remote_host from cylc.owner import user, is_remote_user # TODO - Pyro passphrase handling could do with a complete overhaul, but # it will soon be made obsolete by the upcoming communications refactor. class PassphraseError(Exception): """ Attributes: message - what the problem is. """ def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) class passphrase(object): """Pyro passphrase file utility.""" def __init__(self, suite, owner=None, host=None): self.suite = suite self.owner = owner self.host = host if self.owner is None: self.owner = user if self.host is None: self.host = get_hostname() self.location = None def get_passphrase_file(self, pfile=None, suitedir=None): """ Passphrase location, order of preference: 1/ The pfile argument - used for passphrase creation by "cylc register". 2/ The suite definition directory, because suites may be automatically installed (e.g. by Rose) to remote task hosts, and remote tasks know this location from their execution environment. Local user command invocations can use the suite registration database to find the suite definition directory. HOWEVER, remote user command invocations cannot do this even if the local and remote hosts share a common filesystem, because we cannot be sure if finding the expected suite registration implies a common filesystem or a different remote suite that happens to be registered under the same name. User accounts used for remote control must therefore install the passphrase in the secondary standard locations (below) or use the command line option to explicitly reveal the location. Remote tasks with 'ssh messaging = True' look first in the suite definition directory of the suite host, which they know through the variable CYLC_SUITE_DEF_PATH_ON_SUITE_HOST in the task execution environment. 3/ Secondary locations: (i) $HOME/.cylc/SUITE_HOST/SUITE_OWNER/SUITE_NAME/passphrase (ii) $HOME/.cylc/SUITE_HOST/SUITE_NAME/passphrase (iii) $HOME/.cylc/SUITE_NAME/passphrase These are more sensible locations for remote suite control from accounts that do not actually need the suite definition directory to be installed. """ # 1/ Explicit suite definition directory given on the command line. if pfile: if os.path.isdir(pfile): pfile = os.path.join(pfile, 'passphrase') if os.path.isfile(pfile): self.set_location(pfile) else: # If an explicit location is given, the file must exist. raise PassphraseError( 'ERROR, file not found on %s@%s: %s' % ( user, get_hostname(), pfile)) # 2/ Cylc commands with suite definition directory from local reg. if not self.location and suitedir: pfile = os.path.join(suitedir, 'passphrase') if os.path.isfile(pfile): self.set_location(pfile) # (2 before 3 else sub-suites load their parent suite's # passphrase on start-up because the "cylc run" command runs in # a parent suite task execution environment). # 3/ Running tasks: suite def dir from the task execution environment. if not self.location: try: # Test for presence of task execution environment suite_host = os.environ['CYLC_SUITE_HOST'] suite_owner = os.environ['CYLC_SUITE_OWNER'] except KeyError: # not called by a task pass else: # called by a task if is_remote_host(suite_host) or is_remote_user(suite_owner): # 2(i)/ Task messaging call on a remote account. # First look in the remote suite run directory than suite # definition directory ($CYLC_SUITE_DEF_PATH is modified # for remote tasks): for key in ['CYLC_SUITE_RUN_DIR', 'CYLC_SUITE_DEF_PATH']: if key in os.environ: pfile = os.path.join(os.environ[key], 'passphrase') if os.path.isfile(pfile): self.set_location(pfile) break else: # 2(ii)/ Task messaging call on the suite host account. # Could be a local task or a remote task with 'ssh # messaging = True'. In either case use # $CYLC_SUITE_DEF_PATH_ON_SUITE_HOST which never # changes, not $CYLC_SUITE_DEF_PATH which gets # modified for remote tasks as described above. try: pfile = os.path.join( os.environ['CYLC_SUITE_DEF_PATH_ON_SUITE_HOST'], 'passphrase') except KeyError: pass else: if os.path.isfile(pfile): self.set_location(pfile) # 4/ Other allowed locations, as documented above. if not self.location: locations = [] # For remote control commands, self.host here will be fully # qualified or not depending on what's given on the command line. short_host = re.sub('\..*', '', self.host) prefix = os.path.join(os.environ['HOME'], '.cylc') locations.append( os.path.join( prefix, self.host, self.owner, self.suite, 'passphrase')) if short_host != self.host: locations.append(os.path.join( prefix, short_host, self.owner, self.suite, 'passphrase')) locations.append( os.path.join(prefix, self.host, self.suite, 'passphrase')) if short_host != self.host: locations.append(os.path.join( prefix, short_host, self.suite, 'passphrase')) locations.append(os.path.join(prefix, self.suite, 'passphrase')) for pfile in locations: if os.path.isfile(pfile): self.set_location(pfile) break if not self.location: raise PassphraseError( 'ERROR: passphrase for suite %s not found on %s@%s' % ( self.suite, user, get_hostname())) return self.location def set_location(self, pfile): if cylc.flags.debug: print '%s (%s@%s)' % (pfile, user, get_hostname()) self.location = pfile def generate(self, dir): pfile = os.path.join(dir, 'passphrase') if os.path.isfile(pfile): try: self.get(pfile) return except PassphraseError: pass # Note: Perhaps a UUID might be better here? char_set = ( string.ascii_uppercase + string.ascii_lowercase + string.digits) self.passphrase = ''.join(random.sample(char_set, 20)) mkdir_p(dir) f = open(pfile, 'w') f.write(self.passphrase) f.close() # set passphrase file permissions to owner-only os.chmod(pfile, 0600) if cylc.flags.verbose: print 'Generated suite passphrase: %s@%s:%s' % ( user, get_hostname(), pfile) def get(self, pfile=None, suitedir=None): ppfile = self.get_passphrase_file(pfile, suitedir) psf = open(ppfile, 'r') lines = psf.readlines() psf.close() if len(lines) != 1: raise PassphraseError( 'ERROR, invalid passphrase file: %s@%s:%s' % ( user, get_hostname(), ppfile)) # chomp trailing whitespace and newline self.passphrase = lines[0].strip() return self.passphrase def get_passphrase(suite, owner, host, db): """Find a suite passphrase.""" try: # Local suite, retrieve suite definition directory location. suitedir = os.path.dirname(db.get_suiterc(suite)) except db.Error: suitedir = None return passphrase(suite, owner, host).get(None, suitedir)
gpl-3.0
3,063,340,828,829,731,000
39.537118
79
0.595605
false