content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Concat(function.Function):
"""Concatenate multiple tensors towards specified axis."""
# concat along the channel dimension by default
def __init__(self, axis=1):
self.axis = axis
def check_type_forward(self, in_types):
type_check.expect(in_types.size() > 0)
type_check.expect(in_types[0].ndim >
type_check.Variable(self.axis, 'axis'))
ndim = in_types[0].ndim.eval()
for i in range(1, in_types.size().eval()):
type_check.expect(
in_types[0].dtype == in_types[i].dtype,
in_types[0].ndim == in_types[i].ndim,
)
for d in range(0, ndim):
if d == self.axis:
continue
type_check.expect(in_types[0].shape[d] == in_types[i].shape[d])
def forward(self, xs):
xp = cuda.get_array_module(*xs)
return xp.concatenate(xs, axis=self.axis),
def backward(self, xs, gy):
xp = cuda.get_array_module(*xs)
sizes = numpy.array([x.shape[self.axis] for x in xs[:-1]]).cumsum()
return xp.split(gy[0], sizes, axis=self.axis)
def concat(xs, axis=1):
"""Concatenates given variables along an axis.
Args:
xs (tuple of Variables): Variables to be concatenated.
axis (int): Axis that the input arrays are concatenated along.
Returns:
~chainer.Variable: Output variable.
"""
return Concat(axis=axis)(*xs)
| 29.425926 | 79 | 0.600378 | [
"MIT"
] | umitanuki/chainer | chainer/functions/concat.py | 1,589 | Python |
import cv2
def mean(image):
return cv2.medianBlur(image, 5)
def gaussian(image):
return cv2.GaussianBlur(image, (5, 5), 0)
| 13.5 | 45 | 0.674074 | [
"MIT"
] | Lukasz1928/DICOM-viewer | viewer/images/edits/blur.py | 135 | Python |
class BluePrint:
def __init__(self, build_config, build_class, name="", run_end_of_year=False):
self.run_end_of_year = run_end_of_year
self.name = name
self.build_config = build_config
self.build_class = build_class
| 36 | 82 | 0.698413 | [
"MIT"
] | tnikodem/futureplaner | fup/core/config.py | 252 | Python |
import faker
from unittest import mock
from unittest.mock import patch
from foundations_spec import *
from foundations_core_cli.command_line_interface import CommandLineInterface
from foundations_atlas_cli.sub_parsers.atlas.atlas_parser import AtlasParser
class TestAtlasParser(Spec):
class MockSleep(object):
_epsilon = 0.0001
def __init__(self):
self._time_elapsed = 0
self.time_to_wait = 0
self.callback = lambda: None
def __call__(self, wait_time):
self._time_elapsed += wait_time
if self._time_elapsed >= self.time_to_wait - self._epsilon:
self.callback()
run_file = let_patch_mock('importlib.import_module')
os_file_exists = let_patch_mock('os.path.isfile')
os_chdir = let_patch_mock('os.chdir')
os_kill = let_patch_mock('os.kill')
subprocess_popen = let_patch_mock('subprocess.Popen')
print_mock = let_patch_mock('builtins.print')
exit_mock = let_patch_mock('sys.exit')
open_mock = let_patch_mock('builtins.open')
server_process = let_mock()
requests_post_mock = let_patch_mock('requests.post')
config_manager_mock = let_patch_mock('foundations_contrib.global_state.config_manager')
environment_fetcher_mock = let_patch_mock('foundations_core_cli.environment_fetcher.EnvironmentFetcher.get_all_environments')
find_environment_mock = let_patch_mock('foundations_core_cli.environment_fetcher.EnvironmentFetcher.find_environment')
artifact_downloader_class_mock = let_patch_mock('foundations_contrib.archiving.artifact_downloader.ArtifactDownloader')
artifact_downloader_mock = let_mock()
get_pipeline_archiver_for_job_mock = let_patch_mock('foundations_contrib.archiving.get_pipeline_archiver_for_job')
pipeline_archiver_mock = let_mock()
mock_deploy_job = let_patch_mock('foundations_contrib.job_deployer.deploy_job')
@let
def fake_model_server_pid(self):
import random
return random.randint(1,65000)
@let
def mock_job_id(self):
return self.faker.uuid4()
@let
def mock_model_name(self):
return f'model-{self.faker.random.randint(1000, 9999)}'
@let
def mock_user_provided_model_name(self):
return self.faker.word()
@let_now
def os_cwd(self):
mock = self.patch('os.getcwd')
mock.return_value = '/path/to/where/ever/we/are'
return mock
def _get_mock_file(self):
mock_file_object = Mock()
mock_file_object.__enter__ = lambda x: mock_file_object
mock_file_object.__exit__ = Mock()
return mock_file_object
@let_now
def mock_pid_file(self):
return self._get_mock_file()
@let_now
def sleep_mock(self):
return self.patch('time.sleep', self.MockSleep())
@let
def fake_save_dir(self):
return self.faker.uri_path()
@let
def fake_source_dir(self):
return self.faker.uri_path()
@let
def fake_env(self):
return self.faker.word()
@let
def fake_job_status(self):
status = self.faker.word()
while status == 'queued':
status = self.faker.word()
return status
@let
def server_startup_time(self):
from random import random
between_zero_and_one = random()
return between_zero_and_one * 2.7 + 0.2
@let
def mock_job_deployment(self):
return Mock()
@let
def fake_job_logs(self):
return self.faker.sentence()
@let
def pipeline_context(self):
from foundations_internal.pipeline_context import PipelineContext
return PipelineContext()
@let
def fake_script_file_name(self):
return '{}.py'.format(self.faker.word())
@let
def fake_project_name(self):
return self.faker.word()
@let
def fake_directory(self):
return self.faker.file_path()
@let
def ram(self):
return self.faker.random.random() * 8 + 0.0001
@let
def num_gpus(self):
return self.faker.random_int(0, 8)
@let
def level_1_subparsers_mock(self):
return Mock()
@let
def level_2_subparsers_mock(self):
return Mock()
@let
def level_2_parser_mock(self):
return Mock()
@let
def level_3_parser_mock(self):
return Mock()
@let
def command(self):
return self.faker.word()
def fake_config_path(self, environment):
return 'home/foundations/lou/config/{}.config.yaml'.format(environment)
def test_sub_parser_retrieves_command_line_interface_as_parameter(self):
cli = CommandLineInterface([''])
atlas_sub_parser = AtlasParser(cli)
self.assertTrue(type(atlas_sub_parser._cli) is CommandLineInterface)
def test_sub_parser_setup_parser_on_cli_instantiation(self):
mock_add_parser = self.patch('foundations_atlas_cli.sub_parsers.atlas.atlas_parser.AtlasParser.add_sub_parser')
CommandLineInterface([''])
mock_add_parser.assert_called_once()
@patch('argparse.ArgumentParser')
def test_retrieve_artifact_has_correct_options(self, parser_class_mock):
parser_mock = Mock()
parser_class_mock.return_value = parser_mock
parser_mock.add_subparsers.return_value = self.level_1_subparsers_mock
self.level_1_subparsers_mock.add_parser.return_value = self.level_2_parser_mock
self.level_2_parser_mock.add_subparsers.return_value = self.level_2_subparsers_mock
self.level_2_subparsers_mock.add_parser.return_value = self.level_3_parser_mock
CommandLineInterface([])
parser_class_mock.assert_called_with(prog='foundations')
version_call = call('--version', action='store_true', help='Displays the current Foundations version')
debug_call = call('--debug', action='store_true', help='Sets debug mode for the CLI')
parser_mock.add_argument.assert_has_calls(
[
version_call,
debug_call
]
)
retrieve_call = call('get', help='Get file types from execution environments')
self.level_1_subparsers_mock.add_parser.assert_has_calls([retrieve_call])
retrieve_argument_call = call('job', help='Specify job to retrieve artifacts from')
job_id_call = call('job_id', type=str, help='Specify job uuid of already deployed job')
env_call = call('scheduler_config', type=str, help='Environment to get from')
save_directory_call = call('--save_dir', type=str, default=None, help='Specify local directory path for artifacts to save to. Defaults to directory within current working directory')
source_directory_call = call('--source_dir', type=str, default='', help='Specify relative directory path to download artifacts from. Default will download all artifacts from job')
self.level_2_subparsers_mock.add_parser.assert_has_calls([retrieve_argument_call])
self.level_3_parser_mock.add_argument.assert_has_calls(
[
job_id_call,
env_call,
save_directory_call,
source_directory_call
],
any_order=True
)
def test_retrieve_artifacts_fails_if_missing_environment(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'job', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_retrieve_artifacts_prints_error_if_missing_environment(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'job', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_any_call('Could not find submission configuration with name: `{}`'.format(self.fake_env))
def test_submit_forwards_default_arguments_to_command_line_job_submission(self):
self.patch('foundations_core_cli.job_submission.submit_job.submit', MockCommandLineJobDeployer)
expected_arguments = Mock()
expected_arguments.scheduler_config = self.fake_env
expected_arguments.job_directory = self.fake_directory
expected_arguments.entrypoint = None
expected_arguments.project_name = None
expected_arguments.ram = None
expected_arguments.num_gpus = None
expected_arguments.stream_job_logs = True
expected_arguments.command = [self.command]
CommandLineInterface(['submit', self.fake_env, self.fake_directory, self.command]).execute()
arguments = MockCommandLineJobDeployer.arguments
self._assert_submit_arguments_equal(expected_arguments, arguments)
def test_submit_forwards_specified_arguments_to_command_line_job_submission(self):
self.patch('foundations_core_cli.job_submission.submit_job.submit', MockCommandLineJobDeployer)
expected_arguments = Mock()
expected_arguments.scheduler_config = self.fake_env
expected_arguments.job_directory = self.fake_directory
expected_arguments.entrypoint = self.fake_script_file_name
expected_arguments.project_name = self.fake_project_name
expected_arguments.ram = self.ram
expected_arguments.num_gpus = self.num_gpus
expected_arguments.stream_job_logs = False
expected_arguments.command = [self.command]
command_to_run = [
'submit',
f'--entrypoint={self.fake_script_file_name}',
f'--project-name={self.fake_project_name}',
f'--ram={self.ram}',
f'--num-gpus={self.num_gpus}',
f'--stream-job-logs=False',
self.fake_env,
self.fake_directory,
self.command
]
CommandLineInterface(command_to_run).execute()
arguments = MockCommandLineJobDeployer.arguments
self._assert_submit_arguments_equal(expected_arguments, arguments)
@patch('argparse.ArgumentParser')
def test_retrieve_logs_has_correct_options(self, parser_class_mock):
parser_mock = Mock()
parser_class_mock.return_value = parser_mock
parser_mock.add_subparsers.return_value = self.level_1_subparsers_mock
self.level_1_subparsers_mock.add_parser.return_value = self.level_2_parser_mock
self.level_2_parser_mock.add_subparsers.return_value = self.level_2_subparsers_mock
self.level_2_subparsers_mock.add_parser.return_value = self.level_3_parser_mock
CommandLineInterface([])
parser_class_mock.assert_called_with(prog='foundations')
version_call = call('--version', action='store_true', help='Displays the current Foundations version')
debug_call = call('--debug', action='store_true', help='Sets debug mode for the CLI')
parser_mock.add_argument.assert_has_calls(
[
version_call,
debug_call
]
)
retrieve_call = call('get', help='Get file types from execution environments')
self.level_1_subparsers_mock.add_parser.assert_has_calls([retrieve_call])
retrieve_argument_call = call('logs', help='Get logs for jobs')
job_id_call = call('scheduler_config', type=str, help='Environment to get from')
env_call = call('job_id', type=str, help='Specify job uuid of already deployed job')
self.level_2_subparsers_mock.add_parser.assert_has_calls([retrieve_argument_call])
self.level_3_parser_mock.add_argument.assert_has_calls(
[
job_id_call,
env_call
],
any_order=True
)
def test_get_job_logs_for_environment_that_does_not_exist_prints_error_message(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_any_call('Could not find submission configuration with name: `{}`'.format(self.fake_env))
def test_get_job_logs_for_environment_that_does_not_exist_exits_with_code_1(self):
self.find_environment_mock.return_value = []
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_environment_that_exists_for_job_that_does_not_exist_prints_error_message(self):
self._set_job_status(None)
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with('Error: Job `{}` does not exist for environment `{}`'.format(self.mock_job_id, self.fake_env))
def test_get_job_logs_for_environment_that_exists_for_job_that_does_not_exist_exits_with_code_1(self):
self._set_job_status(None)
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_queued_job_prints_error_message(self):
self._set_job_status('queued')
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with('Error: Job `{}` is queued and has not produced any logs'.format(self.mock_job_id))
def test_get_job_logs_for_queued_job_exits_with_code_1(self):
self._set_job_status('queued')
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_called_with(1)
def test_get_job_logs_for_job_that_exists_and_is_not_queued_prints_logs(self):
self._set_job_status(self.fake_job_status)
self.mock_job_deployment.get_job_logs.return_value = self.fake_job_logs
self.find_environment_mock.return_value = [self.fake_config_path(self.fake_env)]
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.print_mock.assert_called_with(self.fake_job_logs)
def test_get_job_logs_for_job_that_exists_and_is_not_queued_does_not_call_exit(self):
self._set_job_status(self.fake_job_status)
self.mock_job_deployment.get_job_logs.return_value = self.fake_job_logs
load_mock = Mock()
self.patch('foundations_core_cli.job_submission.config.load', load_mock)
load_mock.return_value = None
CommandLineInterface(['get', 'logs', self.fake_env, self.mock_job_id]).execute()
self.exit_mock.assert_not_called()
def _assert_deploy_arguments_equal(self, expected_arguments, actual_arguments):
for attribute_name in ['env', 'job_directory', 'entrypoint', 'project_name', 'ram', 'num_gpus']:
self.assertEqual(getattr(expected_arguments, attribute_name), getattr(actual_arguments, attribute_name))
def _assert_submit_arguments_equal(self, expected_arguments, actual_arguments):
for attribute_name in ['scheduler_config', 'job_directory', 'entrypoint', 'project_name', 'ram', 'num_gpus', 'stream_job_logs', 'command']:
self.assertEqual(getattr(expected_arguments, attribute_name), getattr(actual_arguments, attribute_name))
def _set_run_script_environment(self, environment_to_set):
self.config_manager_mock.__getitem__ = ConditionalReturn()
self.config_manager_mock.__getitem__.return_when(environment_to_set, 'run_script_environment')
def _set_job_status(self, status):
self.mock_job_deployment.get_job_status.return_value = status
mock_job_deployment_class = ConditionalReturn()
mock_job_deployment_class.return_when(self.mock_job_deployment, self.mock_job_id, None, None)
mock_get_item = ConditionalReturn()
mock_get_item.return_when({'deployment_type': mock_job_deployment_class}, 'deployment_implementation')
self.config_manager_mock.__getitem__ = mock_get_item
class MockCommandLineJobDeployer(object):
arguments = None
deploy_called = False
def __init__(self, arguments):
MockCommandLineJobDeployer.arguments = arguments
def deploy(self):
MockCommandLineJobDeployer.deploy_called = True
| 40.398034 | 190 | 0.713478 | [
"Apache-2.0"
] | DeepLearnI/atlas | atlas/foundations_atlas_cli/src/test/sub_parsers/test_atlas_parser.py | 16,442 | Python |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Constructs a variable encoder based on an encoder spec."""
from typing import Dict, List, Optional
import numpy as np
import tensorflow as tf
from covid_epidemiology.src.models.encoders import gam_encoder
from covid_epidemiology.src.models.encoders import variable_encoders
from covid_epidemiology.src.models.shared import typedefs
def encoder_from_encoder_spec(
encoder_spec,
chosen_locations,
num_known_timesteps,
forecast_window_size,
output_window_size,
static_features = None,
static_overrides = None,
covariates = None,
forecasted_covariates = None,
covariate_overrides = None,
ts_categorical_features = None,
random_seed = 0,
static_scalers = None,
ts_scalers = None,
trainable = True,
):
"""Returns a `FeatureEncoder` built as specified in the `encoder_spec`."""
encoder_kwargs = encoder_spec.encoder_kwargs
if encoder_spec.encoder_type == "gam":
gam_kwargs = {}
for kwarg in encoder_kwargs:
if kwarg == "link_fn":
gam_kwargs["link_fn"] = encoder_kwargs["link_fn"]
elif kwarg == "distribution":
gam_kwargs["distribution"] = encoder_kwargs["distribution"]
elif kwarg == "initial_bias":
gam_kwargs["initial_bias"] = encoder_kwargs["initial_bias"]
elif kwarg == "location_dependent_bias":
gam_kwargs["location_dependent_bias"] = encoder_kwargs[
"location_dependent_bias"]
elif kwarg == "lower_bound":
gam_kwargs["lower_bound"] = encoder_kwargs["lower_bound"]
elif kwarg == "upper_bound":
gam_kwargs["upper_bound"] = encoder_kwargs["upper_bound"]
elif kwarg == "use_fixed_covariate_mask":
gam_kwargs["use_fixed_covariate_mask"] = encoder_kwargs[
"use_fixed_covariate_mask"]
else:
raise ValueError(f"Unexpected kwarg: {kwarg} passed to encoder of type "
f"{encoder_spec.encoder_type}")
return gam_encoder.GamEncoder(
chosen_locations,
num_known_timesteps,
forecast_window_size=forecast_window_size,
output_window_size=output_window_size,
static_features=static_features,
static_scalers=static_scalers,
static_overrides=static_overrides,
covariates=covariates,
ts_scalers=ts_scalers,
forecasted_covariates=forecasted_covariates,
covariate_overrides=covariate_overrides,
static_feature_specs=encoder_spec.static_feature_specs,
covariate_feature_specs=encoder_spec.covariate_feature_specs,
ts_categorical_features=ts_categorical_features,
covariate_feature_time_offset=encoder_spec
.covariate_feature_time_offset,
covariate_feature_window=encoder_spec.covariate_feature_window,
random_seed=random_seed,
name=encoder_spec.encoder_name,
trainable=trainable,
**gam_kwargs)
elif encoder_spec.encoder_type == "static":
return variable_encoders.StaticEncoder()
elif encoder_spec.encoder_type == "passthrough":
return variable_encoders.PassThroughEncoder(
chosen_locations,
num_known_timesteps,
forecast_window_size,
covariates=covariates,
forecasted_covariates=forecasted_covariates,
covariate_overrides=covariate_overrides,
covariate_feature_specs=encoder_spec.covariate_feature_specs,
ts_categorical_features=ts_categorical_features,
name=encoder_spec.encoder_name)
elif encoder_spec.encoder_type == "vaccine":
return variable_encoders.VaccineEncoder(
chosen_locations,
num_known_timesteps,
forecast_window_size,
covariates=covariates,
forecasted_covariates=forecasted_covariates,
covariate_overrides=covariate_overrides,
covariate_feature_specs=encoder_spec.covariate_feature_specs,
ts_categorical_features=ts_categorical_features,
name=encoder_spec.encoder_name,
vaccine_type=encoder_spec.vaccine_type)
else:
raise ValueError(f"encoder_spec passed in with invalid encoder_type: "
f"{encoder_spec.encoder_type}")
| 38.634146 | 80 | 0.728325 | [
"Apache-2.0"
] | 04mayukh/google-research | covid_epidemiology/src/models/encoders/variable_encoder_builder.py | 4,752 | Python |
class Solution:
def singleNumber(self, nums: List[int]) -> int:
a = set()
for i in nums:
if i in a:
a.remove(i)
else:
a.add(i)
for i in a:
return i | 23.636364 | 51 | 0.373077 | [
"MIT"
] | FAPractice/leetcode | amazonSingleNumber.py | 260 | Python |
#! /usr/bin/env python
# -*- coding: utf8 -*-
'''
Copyright 2018 University of Liège
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
def test(res, tol):
import numpy as np
from cupydo.testing import *
# Read results from file
with open("AerodynamicCoeff.ascii", 'rb') as f:
lines = f.readlines()
resultA = np.genfromtxt(lines[-1:], delimiter=None)
# Check convergence and results
if (res > tol):
print "\n\n" + "FSI residual = " + str(res) + ", FSI tolerance = " + str(tol)
raise Exception(ccolors.ANSI_RED + "FSI algo failed to converge!" + ccolors.ANSI_RESET)
tests = CTests()
tests.add(CTest('Lift coefficient', resultA[2], 0.245, 1e-1, False)) # rel. tol. of 10%
tests.add(CTest('Drag coefficient', resultA[3], 0.0015, 1e-1, False)) # rel. tol. of 10%
tests.run()
def getFsiP():
"""Fsi parameters"""
import os
filePath = os.path.abspath(os.path.dirname(__file__))
p = {}
# Solvers and config files
p['fluidSolver'] = 'SU2'
p['solidSolver'] = 'RBMI'
p['cfdFile'] = os.path.join(filePath, 'PitchPlungeAirfoil_BGS_parallel_SU2Conf.cfg')
p['csdFile'] = os.path.join(filePath, 'PitchPlungeAirfoil_BGS_parallel_RBMConf.cfg')
# FSI objects
p['interpolator'] = 'Matching'
p['criterion'] = 'Displacements'
p['algorithm'] = 'StaticBGS'
# FSI parameters
p['compType'] = 'unsteady'
p['nDim'] = 2
p['dt'] = 0.001
p['tTot'] = 0.005
p['timeItTresh'] = 0
p['tol'] = 1e-8
p['maxIt'] = 6
p['omega'] = 1.0
p['nSteps'] = 0
p['firstItTgtMat'] = False
p['nodalLoadsType'] = 'force'
return p
def main():
import cupydo.interfaces.Cupydo as cupy
p = getFsiP() # get parameters
cupydo = cupy.CUPyDO(p) # create fsi driver
cupydo.run() # run fsi process
test(cupydo.algorithm.errValue, p['tol']) # check the results
# eof
print ''
# --- This is only accessed if running from command prompt --- #
if __name__ == '__main__':
main()
| 31.3375 | 95 | 0.64659 | [
"Apache-2.0"
] | AxelDechamps/CUPyDO | tests/SU2_RBM/PitchPlungeAirfoil_BGS_parallel_fsi.py | 2,508 | Python |
import functools
import jinja2
import json
import threading
from hashlib import sha256
from ..caserunconfiguration import CaseRunConfiguration, ConfigurationsList, CaseRunConfigurationsList
#from ..exceptions import UnknownEventSubTypeExpression
from .functions import dotted_startswith
from .structures.factory import EventStructuresFactory
class Event():
"""
Base class of event which stores its type, event structures (automatically
provides converted event structures) and decides which testplans and
caserunconfigurations will be executed based on the event.
This base class can be directly used just by providing settings, event type
and optionally definitions of event_structures which are constructed using
EventStructuresFactory. Such created event uses default selection of
testplans with testcases and provides only the provided event structures
(along with possibly automatically converted event structures).
When defining new event type, one should create a new child class
inheriting this class providing additional methods and/or properties.
"""
def __init__(self, settings, event_type, **event_structures):
self.settings = settings
self.type = event_type
self.structures = {}
for structure_name, fields in event_structures.items():
self.structures[structure_name] = EventStructuresFactory.make(settings, structure_name, fields)
self.structures_convert_lock = threading.RLock()
self.id = sha256(f'{self.type}-{json.dumps(event_structures, sort_keys=True)}'.encode()).hexdigest()
def format_branch_spec(self, fmt):
return jinja2.Template(fmt).render(event=self)
def generate_caseRunConfigurations(self, library):
""" Generates caseRunConfigurations for testcases in library relevant to this event
:param library: Library
:type library: tplib.Library
:return: CaseRunConfigurations
:rtype: CaseRunConfigurationsList
"""
caseruns = CaseRunConfigurationsList()
for testplan in self.filter_testPlans(library):
# Init testplan configurations as ConfigurationsList
testplan_configurations = ConfigurationsList(testplan.configurations,
merge_method=self.settings.get('library', 'defaultCaseConfigMergeMethod'))
for testcase in testplan.verificationTestCases:
# Merge testplan configurations with testcase configurations
caserun_configurations = testplan_configurations.merge(testcase.configurations)
for configuration in caserun_configurations:
# Create CaseRunConfiguration
caseruns.append(CaseRunConfiguration(testcase, configuration, [testplan]))
return caseruns
def handles_testplan_artifact_type(self, artifact_type):
"""
Decide if this event is relevant to the provided artifact_type (which
is found in test plan).
"""
return dotted_startswith(self.type, artifact_type)
def filter_testPlans(self, library):
""" Filters testplan from library based on:
- event type and testplan.artifact_type
- testplan execute_on filter
:param library: pipeline library
:type library: tplib.Library
:return: Filtered testplans
:rtype: list of tplib.TestPlan
"""
return library.getTestPlansByQuery('event.handles_testplan_artifact_type(tp.artifact_type) and tp.eval_execute_on(event=event)', event=self)
@property
def additional_testplans_data(self):
""" Event can provide additional testplans. Returns python
dicts, as if they were tplib files read by yaml.safe_load.
:return: list of testplan data
:rtype: tuple
"""
return None
@property
def additional_requrements_data(self):
""" Event can provide additional requrements. Returns python
dicts, as if they were tplib files read by yaml.safe_load.
:return: list of requrements data
:rtype: tuple
"""
return None
@property
def additional_testcases_data(self):
""" Event can provide additional testcases. Returns python
dicts, as if they were tplib files read by yaml.safe_load.
:return: list of testcases data
:rtype: tuple
"""
return None
def __getattr__(self, attrname):
if attrname not in EventStructuresFactory.known():
return super().__getattribute__(attrname)
with self.structures_convert_lock:
try:
return self.structures[attrname]
except KeyError:
pass
structure = EventStructuresFactory.convert(attrname, self.structures)
if structure is NotImplemented:
# Return None if the requested structure is not compatible to
# allow jinja templates to not crash on expressions like
# event.nonexisting_structure.foo but to consider them as None
return None
self.structures[attrname] = structure
return structure
def payload_override(payload_name):
def decorator(method):
@functools.wraps(method)
def decorated(self, *args, **kwargs):
try:
return self.payload[payload_name]
except KeyError:
return method(self, *args, **kwargs)
return decorated
return decorator
| 40.456522 | 148 | 0.679742 | [
"MIT"
] | rhinstaller/permian | libpermian/events/base.py | 5,583 | Python |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_1 import models
class RetentionPolicy(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'all_for_sec': 'int',
'days': 'int',
'per_day': 'int'
}
attribute_map = {
'all_for_sec': 'all_for_sec',
'days': 'days',
'per_day': 'per_day'
}
required_args = {
}
def __init__(
self,
all_for_sec=None, # type: int
days=None, # type: int
per_day=None, # type: int
):
"""
Keyword args:
all_for_sec (int): The length of time to keep the specified snapshots. Measured in seconds.
days (int): The number of days to keep the snapshots after the `all_for_sec` period has passed.
per_day (int): The number of snapshots to keep per day after the `all_for_sec` period has passed.
"""
if all_for_sec is not None:
self.all_for_sec = all_for_sec
if days is not None:
self.days = days
if per_day is not None:
self.per_day = per_day
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `RetentionPolicy`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RetentionPolicy, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RetentionPolicy):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.895161 | 109 | 0.551929 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flasharray/FA_2_1/models/retention_policy.py | 3,707 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TasksOperations:
"""TasksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datamigration.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
group_name: str,
service_name: str,
project_name: str,
task_type: Optional[str] = None,
**kwargs
) -> AsyncIterable["models.TaskList"]:
"""Get tasks in a service.
The services resource is the top-level resource that represents the Database Migration Service.
This method returns a list of tasks owned by a service resource. Some tasks may have a status
of Unknown, which indicates that an error occurred while querying the status of that task.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_type: Filter tasks by task type.
:type task_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TaskList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.datamigration.models.TaskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.TaskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if task_type is not None:
query_parameters['taskType'] = self._serialize.query("task_type", task_type, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('TaskList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ApiError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks'} # type: ignore
async def create_or_update(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
parameters: "models.ProjectTask",
**kwargs
) -> "models.ProjectTask":
"""Create or update task.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. The PUT method creates a new task or updates an existing one, although since tasks
have no mutable custom properties, there is little reason to update an existing one.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:param parameters: Information about the task.
:type parameters: ~azure.mgmt.datamigration.models.ProjectTask
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProjectTask, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.ProjectTask
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ProjectTask"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ProjectTask')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ProjectTask', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ProjectTask', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}'} # type: ignore
async def get(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.ProjectTask":
"""Get task information.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. The GET method retrieves information about a task.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:param expand: Expand the response.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProjectTask, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.ProjectTask
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ProjectTask"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProjectTask', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}'} # type: ignore
async def delete(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
delete_running_tasks: Optional[bool] = None,
**kwargs
) -> None:
"""Delete task.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. The DELETE method deletes a task, canceling it first if it's running.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:param delete_running_tasks: Delete the resource even if it contains running tasks.
:type delete_running_tasks: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if delete_running_tasks is not None:
query_parameters['deleteRunningTasks'] = self._serialize.query("delete_running_tasks", delete_running_tasks, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}'} # type: ignore
async def update(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
parameters: "models.ProjectTask",
**kwargs
) -> "models.ProjectTask":
"""Create or update task.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. The PATCH method updates an existing task, but since tasks have no mutable custom
properties, there is little reason to do so.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:param parameters: Information about the task.
:type parameters: ~azure.mgmt.datamigration.models.ProjectTask
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProjectTask, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.ProjectTask
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ProjectTask"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ProjectTask')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProjectTask', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}'} # type: ignore
async def cancel(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
**kwargs
) -> "models.ProjectTask":
"""Cancel a task.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. This method cancels a task if it's currently queued or running.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProjectTask, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.ProjectTask
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ProjectTask"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
accept = "application/json"
# Construct URL
url = self.cancel.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProjectTask', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
cancel.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}/cancel'} # type: ignore
async def command(
self,
group_name: str,
service_name: str,
project_name: str,
task_name: str,
parameters: "models.CommandProperties",
**kwargs
) -> "models.CommandProperties":
"""Execute a command on a task.
The tasks resource is a nested, proxy-only resource representing work performed by a DMS
instance. This method executes a command on a running task.
:param group_name: Name of the resource group.
:type group_name: str
:param service_name: Name of the service.
:type service_name: str
:param project_name: Name of the project.
:type project_name: str
:param task_name: Name of the Task.
:type task_name: str
:param parameters: Command to execute.
:type parameters: ~azure.mgmt.datamigration.models.CommandProperties
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CommandProperties, or the result of cls(response)
:rtype: ~azure.mgmt.datamigration.models.CommandProperties
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.CommandProperties"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-10-30-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.command.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'groupName': self._serialize.url("group_name", group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'projectName': self._serialize.url("project_name", project_name, 'str'),
'taskName': self._serialize.url("task_name", task_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CommandProperties')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ApiError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CommandProperties', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{groupName}/providers/Microsoft.DataMigration/services/{serviceName}/projects/{projectName}/tasks/{taskName}/command'} # type: ignore
| 48.032313 | 214 | 0.659455 | [
"MIT"
] | Hamster-Huey/azure-cli-extensions | src/datamigration/azext_datamigration/vendored_sdks/datamigration/aio/operations/_tasks_operations.py | 28,243 | Python |
from django.apps import AppConfig
class ComputecalcConfig(AppConfig):
name = 'computecalc'
| 16.166667 | 35 | 0.773196 | [
"MIT"
] | mojosan7/cloudcalc | src/computecalc/apps.py | 97 | Python |
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 29 16:02:11 2019
@author: Christian Zwinkels-Valero
"""
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
# Activation functions
def sigmoid(z):
return 1 / (1 + np.exp(z))
def d_sigmoid(z):
return sigmoid(z)*(1 - sigmoid(z))
def relu(z, d=False):
if d == False:
f = np.maximum(0.001*z, z)
else:
f = np.where(z >= 0, 1, 0.001)
return f
# Data processing
data = pd.read_csv("IRISS.csv", header=None, skiprows=1)
data = data.sample(frac=1)
X = data[data.columns[0:data.shape[-1] - 1]].to_numpy()
X = (X - np.mean(X, axis=0)) / (np.max(X, axis=0) - np.min(X, axis=0))
X = X.T
y = np.array([data[data.shape[-1] - 1].to_numpy()])
# Initialization
layer_sizes = (X.shape[0], 12, 8, 4, y.shape[0])
weight_sizes = [(y, x) for y, x in zip(layer_sizes[1:], layer_sizes[0:])]
weights = [np.random.rand(l[0], l[1])*np.sqrt(1/l[1]) for l in weight_sizes]
biases = [np.zeros((i, 1)) for i in layer_sizes[1:]]
# Foward propagation
def feedforward(data_in, Ws, Bs):
Z = []
A = [data_in] # First activation layer is the inputs
# Hidden layer computation
for i in range(len(Ws) - 1):
z = np.dot(Ws[i], A[-1]) + Bs[i]
a = relu(z, d=False)
Z.append(z)
A.append(a)
# Ouput layer computation
z = np.dot(Ws[-1], A[-1]) + Bs[-1]
Z.append(z)
a = sigmoid(z)
A.append(a)
return Z, A
# Calculating the costs
def costs(data_in, outputs, Ws, Bs):
Z, pred = feedforward(data_in, Ws, Bs)
delta = []
dj_dw = []
# Loss computation
loss = -1*(outputs*np.log(pred[-1]) + (1-outputs)*np.log(1 - pred[-1]))
loss = np.mean(loss)
# Final layer derivatives
dj_da = -1*((outputs[-1]/pred[-1]) + (1 - outputs)/(1 - pred[-1]))
da_dz = d_sigmoid(Z[-1])
delta.append(dj_da*da_dz)
# Deltas calculation
for i in range(1, len(Ws)):
d = np.dot(Ws[-i].T, delta[-i]) * relu(Z[-i - 1], d=True)
delta.insert(0, np.mean(d, axis=1, keepdims=True))
delta[-1] = np.mean(delta[-1])
# dj_dw calculation
for i in range(1, len(pred)):
b = []
A = pred[-i - 1].T
for a in A:
b.append(np.dot(delta[-i], [a]))
d = np.zeros(weight_sizes[-i])
for s in b:
d += s
d /= len(d)
dj_dw.insert(0, d)
return loss, delta, dj_dw
def train(data_in, outputs, Ws, Bs, iters, alpha):
c_his = []
i_his = []
for i in range(iters):
c, Bu, Wu = costs(data_in, outputs, Ws, Bs)
for j in range(len(Ws)):
Bs[j] = np.add(Bs[j], np.multiply(-alpha, Bu[j]))
Ws[j] = np.add(Ws[j], np.multiply(-alpha, Wu[j]))
c_his.append(c)
i_his.append(i)
plt.plot(i_his, c_his)
plt.show()
| 26.327103 | 76 | 0.555911 | [
"MIT"
] | Christian-Zwinkels-CAS/Machine-Learning-Attempt | Supervised_Learning/Neural_Network/NN.py | 2,817 | Python |
"""Get example scripts, notebooks, and data files."""
import argparse
from datetime import datetime, timedelta
from glob import glob
import json
import os
import pkg_resources
from progressbar import ProgressBar
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
import shutil
import sys
example_data_files = (
["MovingEddies_data/" + fn for fn in [
"moving_eddiesP.nc", "moving_eddiesU.nc", "moving_eddiesV.nc"]]
+ ["OFAM_example_data/" + fn for fn in [
"OFAM_simple_U.nc", "OFAM_simple_V.nc"]]
+ ["Peninsula_data/" + fn for fn in [
"peninsulaU.nc", "peninsulaV.nc", "peninsulaP.nc"]]
+ ["GlobCurrent_example_data/" + fn for fn in [
"%s000000-GLOBCURRENT-L4-CUReul_hs-ALT_SUM-v02.0-fv01.0.nc" % (
date.strftime("%Y%m%d"))
for date in ([datetime(2002, 1, 1) + timedelta(days=x)
for x in range(0, 365)] + [datetime(2003, 1, 1)])]]
+ ["DecayingMovingEddy_data/" + fn for fn in [
"decaying_moving_eddyU.nc", "decaying_moving_eddyV.nc"]]
+ ["NemoCurvilinear_data/" + fn for fn in [
"U_purely_zonal-ORCA025_grid_U.nc4", "V_purely_zonal-ORCA025_grid_V.nc4",
"mesh_mask.nc4"]]
+ ["NemoNorthSeaORCA025-N006_data/" + fn for fn in [
"ORCA025-N06_20000104d05U.nc", "ORCA025-N06_20000109d05U.nc",
"ORCA025-N06_20000104d05V.nc", "ORCA025-N06_20000109d05V.nc",
"ORCA025-N06_20000104d05W.nc", "ORCA025-N06_20000109d05W.nc",
"coordinates.nc"]])
example_data_url = "http://oceanparcels.org/examples-data"
def _maybe_create_dir(path):
"""Create directory (and parents) if they don't exist."""
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def copy_data_and_examples_from_package_to(target_path):
"""Copy example data from Parcels directory.
Return thos parths of the list `file_names` that were not found in the
package.
"""
examples_in_package = pkg_resources.resource_filename("parcels", "examples")
try:
shutil.copytree(examples_in_package, target_path)
except Exception as e:
print(e)
pass
def set_jupyter_kernel_to_python_version(path, python_version=2):
"""Set notebook kernelspec to desired python version.
This also drops all other meta data from the notebook.
"""
for file_name in glob(os.path.join(path, "*.ipynb")):
with open(file_name, 'r') as f:
notebook_data = json.load(f)
notebook_data['metadata'] = {"kernelspec": {
"display_name": "Python {}".format(python_version),
"language": "python",
"name": "python{}".format(python_version)}}
with open(file_name, 'w') as f:
json.dump(notebook_data, f, indent=2)
def _still_to_download(file_names, target_path):
"""Only return the files that are not yet present on disk."""
for fn in list(file_names):
if os.path.exists(os.path.join(target_path, fn)):
file_names.remove(fn)
return file_names
def download_files(source_url, file_names, target_path):
"""Mirror file_names from source_url to target_path."""
_maybe_create_dir(target_path)
pbar = ProgressBar()
print("Downloading %s ..." % (source_url.split("/")[-1]))
for filename in pbar(file_names):
_maybe_create_dir(os.path.join(target_path, os.path.dirname(filename)))
if not os.path.exists(os.path.join(target_path, filename)):
download_url = source_url + "/" + filename
src = urlopen(download_url)
with open(os.path.join(target_path, filename), 'wb') as dst:
dst.write(src.read())
def main(target_path=None):
"""Get example scripts, example notebooks, and example data.
Copy the examples from the package directory and get the example data either
from the package directory or from the Parcels website.
"""
if target_path is None:
# get targe directory
parser = argparse.ArgumentParser(
description="Get Parcels example data.")
parser.add_argument(
"target_path",
help="Where to put the tutorials? (This path will be created.)")
args = parser.parse_args()
target_path = args.target_path
if os.path.exists(target_path):
print("Error: {} already exists.".format(target_path))
return
# copy data and examples
copy_data_and_examples_from_package_to(target_path)
# make sure the notebooks use the correct python version
set_jupyter_kernel_to_python_version(
target_path,
python_version=sys.version_info[0])
# try downloading remaining files
remaining_example_data_files = _still_to_download(
example_data_files, target_path)
download_files(example_data_url, remaining_example_data_files, target_path)
if __name__ == "__main__":
main()
| 34.486301 | 81 | 0.664548 | [
"MIT"
] | becgorton/parcels | parcels/scripts/get_examples.py | 5,035 | Python |
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module provides the notification command to gsutil."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import getopt
import re
import time
import uuid
from datetime import datetime
from gslib import metrics
from gslib.cloud_api import AccessDeniedException
from gslib.cloud_api import BadRequestException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import PublishPermissionDeniedException
from gslib.command import Command
from gslib.command import NO_MAX
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.help_provider import CreateHelpText
from gslib.project_id import PopulateProjectId
from gslib.pubsub_api import PubsubApi
from gslib.storage_url import StorageUrlFromString
from gslib.third_party.pubsub_apitools.pubsub_v1_messages import Binding
from gslib.utils import copy_helper
# Cloud Pub/Sub commands
_LIST_SYNOPSIS = """
gsutil notification list bucket_url...
"""
_DELETE_SYNOPSIS = """
gsutil notification delete (notificationConfigName|bucket_url)...
"""
_CREATE_SYNOPSIS = """
gsutil notification create -f (json|none) [-p prefix] [-t topic] \\
[-m key:value]... [-e eventType]... bucket_url
"""
# Object Change Notification commands
_WATCHBUCKET_SYNOPSIS = """
gsutil notification watchbucket [-i id] [-t token] app_url bucket_url
"""
_STOPCHANNEL_SYNOPSIS = """
gsutil notification stopchannel channel_id resource_id
"""
_SYNOPSIS = (
_CREATE_SYNOPSIS +
_DELETE_SYNOPSIS.lstrip('\n') +
_LIST_SYNOPSIS.lstrip('\n') +
_WATCHBUCKET_SYNOPSIS +
_STOPCHANNEL_SYNOPSIS.lstrip('\n') + '\n') # yapf: disable
_LIST_DESCRIPTION = """
<B>LIST</B>
The list sub-command provides a list of notification configs belonging to a
given bucket. The listed name of each notification config can be used with
the delete sub-command to delete that specific notification config.
For listing Object Change Notifications instead of Cloud Pub/Sub notification
subscription configs, add a -o flag.
<B>LIST EXAMPLES</B>
Fetch the list of notification configs for the bucket example-bucket:
gsutil notification list gs://example-bucket
The same as above, but for Object Change Notifications instead of Cloud
Pub/Sub notification subscription configs:
gsutil notification list -o gs://example-bucket
Fetch the notification configs in all buckets matching a wildcard:
gsutil notification list gs://example-*
Fetch all of the notification configs for buckets in the default project:
gsutil notification list gs://*
"""
_DELETE_DESCRIPTION = """
<B>DELETE</B>
The delete sub-command deletes notification configs from a bucket. If a
notification config name is passed as a parameter, that notification config
alone will be deleted. If a bucket name is passed, all notification configs
associated with that bucket will be deleted.
Cloud Pub/Sub topics associated with this notification config will not be
deleted by this command. Those must be deleted separately, for example with
the gcloud command `gcloud beta pubsub topics delete`.
Object Change Notification subscriptions cannot be deleted with this command.
For that, see the command `gsutil notification stopchannel`.
<B>DELETE EXAMPLES</B>
Delete a single notification config (with ID 3) in the bucket example-bucket:
gsutil notification delete projects/_/buckets/example-bucket/notificationConfigs/3
Delete all notification configs in the bucket example-bucket:
gsutil notification delete gs://example-bucket
"""
_CREATE_DESCRIPTION = """
<B>CREATE</B>
The create sub-command creates a notification config on a bucket, establishing
a flow of event notifications from Cloud Storage to a Cloud Pub/Sub topic. As
part of creating this flow, the create command also verifies that the
destination Cloud Pub/Sub topic exists, creating it if necessary, and verifies
that the Cloud Storage bucket has permission to publish events to that topic,
granting the permission if necessary.
If a destination Cloud Pub/Sub topic is not specified with the -t flag, Cloud
Storage will by default choose a topic name in the default project whose ID is
the same the bucket name. For example, if the default project ID specified is
'default-project' and the bucket being configured is gs://example-bucket, the
create command will use the Cloud Pub/Sub topic
"projects/default-project/topics/example-bucket".
In order to enable notifications, a `special Cloud Storage service account
<https://cloud.google.com/storage/docs/projects#service-accounts>`_ unique to
each project must have the IAM permission "projects.topics.publish". This
command will check to see if that permission exists and, if not, will attempt
to grant it.
You can create multiple notification configurations for a bucket, but their
triggers cannot overlap such that a single event could send multiple
notifications. Attempting to create a notification configuration that
overlaps with an existing notification configuration results in an error.
<B>CREATE EXAMPLES</B>
Begin sending notifications of all changes to the bucket example-bucket
to the Cloud Pub/Sub topic projects/default-project/topics/example-bucket:
gsutil notification create -f json gs://example-bucket
The same as above, but specifies the destination topic ID 'files-to-process'
in the default project:
gsutil notification create -f json \\
-t files-to-process gs://example-bucket
The same as above, but specifies a Cloud Pub/Sub topic belonging to the
specific cloud project 'example-project':
gsutil notification create -f json \\
-t projects/example-project/topics/files-to-process gs://example-bucket
Create a notification config that will only send an event when a new object
has been created:
gsutil notification create -f json -e OBJECT_FINALIZE gs://example-bucket
Create a topic and notification config that will only send an event when
an object beginning with "photos/" is affected:
gsutil notification create -p photos/ gs://example-bucket
List all of the notificationConfigs in bucket example-bucket:
gsutil notification list gs://example-bucket
Delete all notitificationConfigs for bucket example-bucket:
gsutil notification delete gs://example-bucket
Delete one specific notificationConfig for bucket example-bucket:
gsutil notification delete \\
projects/_/buckets/example-bucket/notificationConfigs/1
<B>OPTIONS</B>
The create sub-command has the following options
-e Specify an event type filter for this notification config. Cloud
Storage will only send notifications of this type. You may specify
this parameter multiple times to allow multiple event types. If not
specified, Cloud Storage will send notifications for all event
types. The valid types are:
OBJECT_FINALIZE - An object has been created.
OBJECT_METADATA_UPDATE - The metadata of an object has changed.
OBJECT_DELETE - An object has been permanently deleted.
OBJECT_ARCHIVE - A live Cloud Storage object has been archived.
-f Specifies the payload format of notification messages. Must be
either "json" for a payload matches the object metadata for the
JSON API, or "none" to specify no payload at all. In either case,
notification details are available in the message attributes.
-m Specifies a key:value attribute that will be appended to the set
of attributes sent to Cloud Pub/Sub for all events associated with
this notification config. You may specify this parameter multiple
times to set multiple attributes.
-p Specifies a prefix path filter for this notification config. Cloud
Storage will only send notifications for objects in this bucket
whose names begin with the specified prefix.
-s Skips creation and permission assignment of the Cloud Pub/Sub topic.
This is useful if the caller does not have permission to access
the topic in question, or if the topic already exists and has the
appropriate publish permission assigned.
-t The Cloud Pub/Sub topic to which notifications should be sent. If
not specified, this command will choose a topic whose project is
your default project and whose ID is the same as the Cloud Storage
bucket name.
<B>NEXT STEPS</B>
Once the create command has succeeded, Cloud Storage will publish a message to
the specified Cloud Pub/Sub topic when eligible changes occur. In order to
receive these messages, you must create a Pub/Sub subscription for your
Pub/Sub topic. To learn more about creating Pub/Sub subscriptions, see `the
Pub/Sub Subscriber Overview <https://cloud.google.com/pubsub/docs/subscriber>`_.
You can create a simple Pub/Sub subscription using the ``gcloud`` command-line
tool. For example, to create a new subscription on the topic "myNewTopic" and
attempt to pull messages from it, you could run:
gcloud beta pubsub subscriptions create --topic myNewTopic testSubscription
gcloud beta pubsub subscriptions pull --auto-ack testSubscription
"""
_WATCHBUCKET_DESCRIPTION = """
<B>WATCHBUCKET</B>
The watchbucket sub-command can be used to watch a bucket for object changes.
A service account must be used when running this command.
The app_url parameter must be an HTTPS URL to an application that will be
notified of changes to any object in the bucket. The URL endpoint must be
a verified domain on your project. See `Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_
for details.
The optional id parameter can be used to assign a unique identifier to the
created notification channel. If not provided, a random UUID string will be
generated.
The optional token parameter can be used to validate notifications events.
To do this, set this custom token and store it to later verify that
notification events contain the client token you expect.
<B>WATCHBUCKET EXAMPLES</B>
Watch the bucket example-bucket for changes and send notifications to an
application server running at example.com:
gsutil notification watchbucket https://example.com/notify \\
gs://example-bucket
Assign identifier my-channel-id to the created notification channel:
gsutil notification watchbucket -i my-channel-id \\
https://example.com/notify gs://example-bucket
Set a custom client token that will be included with each notification event:
gsutil notification watchbucket -t my-client-token \\
https://example.com/notify gs://example-bucket
"""
_STOPCHANNEL_DESCRIPTION = """
<B>STOPCHANNEL</B>
The stopchannel sub-command can be used to stop sending change events to a
notification channel.
The channel_id and resource_id parameters should match the values from the
response of a bucket watch request.
<B>STOPCHANNEL EXAMPLES</B>
Stop the notification event channel with channel identifier channel1 and
resource identifier SoGqan08XDIFWr1Fv_nGpRJBHh8:
gsutil notification stopchannel channel1 SoGqan08XDIFWr1Fv_nGpRJBHh8
"""
_DESCRIPTION = """
The notification command is used to configure Google Cloud Storage support for
sending notifications to Cloud Pub/Sub as well as to configure the object
change notification feature.
<B>CLOUD PUB/SUB</B>
The "create", "list", and "delete" sub-commands deal with configuring Cloud
Storage integration with Google Cloud Pub/Sub.
""" + _CREATE_DESCRIPTION + _LIST_DESCRIPTION + _DELETE_DESCRIPTION + """
<B>OBJECT CHANGE NOTIFICATIONS</B>
For more information on the Object Change Notification feature, please see
`the Object Change Notification docs
<https://cloud.google.com/storage/docs/object-change-notification>`_.
The "watchbucket" and "stopchannel" sub-commands enable and disable Object
Change Notifications.
""" + _WATCHBUCKET_DESCRIPTION + _STOPCHANNEL_DESCRIPTION + """
<B>NOTIFICATIONS AND PARALLEL COMPOSITE UPLOADS</B>
By default, gsutil enables parallel composite uploads for large files (see
"gsutil help cp"), which means that an upload of a large object can result
in multiple temporary component objects being uploaded before the actual
intended object is created. Any subscriber to notifications for this bucket
will then see a notification for each of these components being created and
deleted. If this is a concern for you, note that parallel composite uploads
can be disabled by setting "parallel_composite_upload_threshold = 0" in your
boto config file. Alternately, your subscriber code can filter out gsutil's
parallel composite uploads by ignoring any notification about objects whose
names contain (but do not start with) the following string:
"{composite_namespace}".
""".format(composite_namespace=copy_helper.PARALLEL_UPLOAD_TEMP_NAMESPACE)
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE = """
Watch bucket attempt failed:
{watch_error}
You attempted to watch a bucket with an application URL of:
{watch_url}
which is not authorized for your project. Please ensure that you are using
Service Account authentication and that the Service Account's project is
authorized for the application URL. Notification endpoint URLs must also be
whitelisted in your Cloud Console project. To do that, the domain must also be
verified using Google Webmaster Tools. For instructions, please see
`Notification Authorization
<https://cloud.google.com/storage/docs/object-change-notification#_Authorization>`_.
"""
_DETAILED_HELP_TEXT = CreateHelpText(_SYNOPSIS, _DESCRIPTION)
# yapf: disable
_create_help_text = (
CreateHelpText(_CREATE_SYNOPSIS, _CREATE_DESCRIPTION))
_list_help_text = (
CreateHelpText(_LIST_SYNOPSIS, _LIST_DESCRIPTION))
_delete_help_text = (
CreateHelpText(_DELETE_SYNOPSIS, _DELETE_DESCRIPTION))
_watchbucket_help_text = (
CreateHelpText(_WATCHBUCKET_SYNOPSIS, _WATCHBUCKET_DESCRIPTION))
_stopchannel_help_text = (
CreateHelpText(_STOPCHANNEL_SYNOPSIS, _STOPCHANNEL_DESCRIPTION))
# yapf: enable
PAYLOAD_FORMAT_MAP = {
'none': 'NONE',
'json': 'JSON_API_V1',
}
class NotificationCommand(Command):
"""Implementation of gsutil notification command."""
# Notification names might look like one of these:
# canonical form: projects/_/buckets/bucket/notificationConfigs/3
# JSON API form: b/bucket/notificationConfigs/5
# Either of the above might start with a / if a user is copying & pasting.
def _GetNotificationPathRegex(self):
if not NotificationCommand._notification_path_regex:
NotificationCommand._notification_path_regex = re.compile(
('/?(projects/[^/]+/)?b(uckets)?/(?P<bucket>[^/]+)/'
'notificationConfigs/(?P<notification>[0-9]+)'))
return NotificationCommand._notification_path_regex
_notification_path_regex = None
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'notification',
command_name_aliases=[
'notify',
'notifyconfig',
'notifications',
'notif',
],
usage_synopsis=_SYNOPSIS,
min_args=2,
max_args=NO_MAX,
supported_sub_args='i:t:m:t:of:e:p:s',
file_url_ok=False,
provider_url_ok=False,
urls_start_arg=1,
gs_api_support=[ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments={
'watchbucket': [
CommandArgument.MakeFreeTextArgument(),
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'stopchannel': [],
'list': [
CommandArgument.MakeZeroOrMoreCloudBucketURLsArgument(),
],
'delete': [
# Takes a list of one of the following:
# notification: projects/_/buckets/bla/notificationConfigs/5,
# bucket: gs://foobar
CommandArgument.MakeZeroOrMoreCloudURLsArgument(),
],
'create': [
CommandArgument.MakeFreeTextArgument(), # Cloud Pub/Sub topic
CommandArgument.MakeNCloudBucketURLsArgument(1),
]
},
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='notification',
help_name_aliases=[
'watchbucket',
'stopchannel',
'notifyconfig',
],
help_type='command_help',
help_one_line_summary='Configure object change notification',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={
'create': _create_help_text,
'list': _list_help_text,
'delete': _delete_help_text,
'watchbucket': _watchbucket_help_text,
'stopchannel': _stopchannel_help_text,
},
)
def _WatchBucket(self):
"""Creates a watch on a bucket given in self.args."""
self.CheckArguments()
identifier = None
client_token = None
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-i':
identifier = a
if o == '-t':
client_token = a
identifier = identifier or str(uuid.uuid4())
watch_url = self.args[0]
bucket_arg = self.args[-1]
if not watch_url.lower().startswith('https://'):
raise CommandException('The application URL must be an https:// URL.')
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
self.logger.info('Watching bucket %s with application URL %s ...',
bucket_url, watch_url)
try:
channel = self.gsutil_api.WatchBucket(bucket_url.bucket_name,
watch_url,
identifier,
token=client_token,
provider=bucket_url.scheme)
except AccessDeniedException as e:
self.logger.warn(
NOTIFICATION_AUTHORIZATION_FAILED_MESSAGE.format(watch_error=str(e),
watch_url=watch_url))
raise
channel_id = channel.id
resource_id = channel.resourceId
client_token = channel.token
self.logger.info('Successfully created watch notification channel.')
self.logger.info('Watch channel identifier: %s', channel_id)
self.logger.info('Canonicalized resource identifier: %s', resource_id)
self.logger.info('Client state token: %s', client_token)
return 0
def _StopChannel(self):
channel_id = self.args[0]
resource_id = self.args[1]
self.logger.info('Removing channel %s with resource identifier %s ...',
channel_id, resource_id)
self.gsutil_api.StopChannel(channel_id, resource_id, provider='gs')
self.logger.info('Succesfully removed channel.')
return 0
def _ListChannels(self, bucket_arg):
"""Lists active channel watches on a bucket given in self.args."""
bucket_url = StorageUrlFromString(bucket_arg)
if not (bucket_url.IsBucket() and bucket_url.scheme == 'gs'):
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
if not bucket_url.IsBucket():
raise CommandException('URL must name a bucket for the %s command.' %
self.command_name)
channels = self.gsutil_api.ListChannels(bucket_url.bucket_name,
provider='gs').items
self.logger.info(
'Bucket %s has the following active Object Change Notifications:',
bucket_url.bucket_name)
for idx, channel in enumerate(channels):
self.logger.info('\tNotification channel %d:', idx + 1)
self.logger.info('\t\tChannel identifier: %s', channel.channel_id)
self.logger.info('\t\tResource identifier: %s', channel.resource_id)
self.logger.info('\t\tApplication URL: %s', channel.push_url)
self.logger.info('\t\tCreated by: %s', channel.subscriber_email)
self.logger.info(
'\t\tCreation time: %s',
str(datetime.fromtimestamp(channel.creation_time_ms / 1000)))
return 0
def _Create(self):
self.CheckArguments()
# User-specified options
pubsub_topic = None
payload_format = None
custom_attributes = {}
event_types = []
object_name_prefix = None
should_setup_topic = True
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-e':
event_types.append(a)
elif o == '-f':
payload_format = a
elif o == '-m':
if ':' not in a:
raise CommandException(
'Custom attributes specified with -m should be of the form '
'key:value')
key, value = a.split(':')
custom_attributes[key] = value
elif o == '-p':
object_name_prefix = a
elif o == '-s':
should_setup_topic = False
elif o == '-t':
pubsub_topic = a
if payload_format not in PAYLOAD_FORMAT_MAP:
raise CommandException(
"Must provide a payload format with -f of either 'json' or 'none'")
payload_format = PAYLOAD_FORMAT_MAP[payload_format]
bucket_arg = self.args[-1]
bucket_url = StorageUrlFromString(bucket_arg)
if not bucket_url.IsCloudUrl() or not bucket_url.IsBucket():
raise CommandException(
"%s %s requires a GCS bucket name, but got '%s'" %
(self.command_name, self.subcommand_name, bucket_arg))
if bucket_url.scheme != 'gs':
raise CommandException(
'The %s command can only be used with gs:// bucket URLs.' %
self.command_name)
bucket_name = bucket_url.bucket_name
self.logger.debug('Creating notification for bucket %s', bucket_url)
# Find the project this bucket belongs to
bucket_metadata = self.gsutil_api.GetBucket(bucket_name,
fields=['projectNumber'],
provider=bucket_url.scheme)
bucket_project_number = bucket_metadata.projectNumber
# If not specified, choose a sensible default for the Cloud Pub/Sub topic
# name.
if not pubsub_topic:
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
bucket_name)
if not pubsub_topic.startswith('projects/'):
# If a user picks a topic ID (mytopic) but doesn't pass the whole name (
# projects/my-project/topics/mytopic ), pick a default project.
pubsub_topic = 'projects/%s/topics/%s' % (PopulateProjectId(None),
pubsub_topic)
self.logger.debug('Using Cloud Pub/Sub topic %s', pubsub_topic)
just_modified_topic_permissions = False
if should_setup_topic:
# Ask GCS for the email address that represents GCS's permission to
# publish to a Cloud Pub/Sub topic from this project.
service_account = self.gsutil_api.GetProjectServiceAccount(
bucket_project_number, provider=bucket_url.scheme).email_address
self.logger.debug('Service account for project %d: %s',
bucket_project_number, service_account)
just_modified_topic_permissions = self._CreateTopic(
pubsub_topic, service_account)
for attempt_number in range(0, 2):
try:
create_response = self.gsutil_api.CreateNotificationConfig(
bucket_name,
pubsub_topic=pubsub_topic,
payload_format=payload_format,
custom_attributes=custom_attributes,
event_types=event_types if event_types else None,
object_name_prefix=object_name_prefix,
provider=bucket_url.scheme)
break
except PublishPermissionDeniedException:
if attempt_number == 0 and just_modified_topic_permissions:
# If we have just set the IAM policy, it may take up to 10 seconds to
# take effect.
self.logger.info(
'Retrying create notification in 10 seconds '
'(new permissions may take up to 10 seconds to take effect.)')
time.sleep(10)
else:
raise
notification_name = 'projects/_/buckets/%s/notificationConfigs/%s' % (
bucket_name, create_response.id)
self.logger.info('Created notification config %s', notification_name)
return 0
def _CreateTopic(self, pubsub_topic, service_account):
"""Assures that a topic exists, creating it if necessary.
Also adds GCS as a publisher on that bucket, if necessary.
Args:
pubsub_topic: name of the Cloud Pub/Sub topic to use/create.
service_account: the GCS service account that needs publish permission.
Returns:
true if we modified IAM permissions, otherwise false.
"""
pubsub_api = PubsubApi(logger=self.logger)
# Verify that the Pub/Sub topic exists. If it does not, create it.
try:
pubsub_api.GetTopic(topic_name=pubsub_topic)
self.logger.debug('Topic %s already exists', pubsub_topic)
except NotFoundException:
self.logger.debug('Creating topic %s', pubsub_topic)
pubsub_api.CreateTopic(topic_name=pubsub_topic)
self.logger.info('Created Cloud Pub/Sub topic %s', pubsub_topic)
# Verify that the service account is in the IAM policy.
policy = pubsub_api.GetTopicIamPolicy(topic_name=pubsub_topic)
binding = Binding(role='roles/pubsub.publisher',
members=['serviceAccount:%s' % service_account])
# This could be more extensive. We could, for instance, check for roles
# that are stronger that pubsub.publisher, like owner. We could also
# recurse up the hierarchy looking to see if there are project-level
# permissions. This can get very complex very quickly, as the caller
# may not necessarily have access to the project-level IAM policy.
# There's no danger in double-granting permission just to make sure it's
# there, though.
if binding not in policy.bindings:
policy.bindings.append(binding)
# transactional safety via etag field.
pubsub_api.SetTopicIamPolicy(topic_name=pubsub_topic, policy=policy)
return True
else:
self.logger.debug('GCS already has publish permission to topic %s.',
pubsub_topic)
return False
def _EnumerateNotificationsFromArgs(self, accept_notification_configs=True):
"""Yields bucket/notification tuples from command-line args.
Given a list of strings that are bucket names (gs://foo) or notification
config IDs, yield tuples of bucket names and their associated notifications.
Args:
accept_notification_configs: whether notification configs are valid args.
Yields:
Tuples of the form (bucket_name, Notification)
"""
path_regex = self._GetNotificationPathRegex()
for list_entry in self.args:
match = path_regex.match(list_entry)
if match:
if not accept_notification_configs:
raise CommandException(
'%s %s accepts only bucket names, but you provided %s' %
(self.command_name, self.subcommand_name, list_entry))
bucket_name = match.group('bucket')
notification_id = match.group('notification')
found = False
for notification in self.gsutil_api.ListNotificationConfigs(
bucket_name, provider='gs'):
if notification.id == notification_id:
yield (bucket_name, notification)
found = True
break
if not found:
raise NotFoundException('Could not find notification %s' % list_entry)
else:
storage_url = StorageUrlFromString(list_entry)
if not storage_url.IsCloudUrl():
raise CommandException(
'The %s command must be used on cloud buckets or notification '
'config names.' % self.command_name)
if storage_url.scheme != 'gs':
raise CommandException('The %s command only works on gs:// buckets.')
path = None
if storage_url.IsProvider():
path = 'gs://*'
elif storage_url.IsBucket():
path = list_entry
if not path:
raise CommandException(
'The %s command cannot be used on cloud objects, only buckets' %
self.command_name)
for blr in self.WildcardIterator(path).IterBuckets(
bucket_fields=['id']):
for notification in self.gsutil_api.ListNotificationConfigs(
blr.storage_url.bucket_name, provider='gs'):
yield (blr.storage_url.bucket_name, notification)
def _List(self):
self.CheckArguments()
if self.sub_opts:
if '-o' in dict(self.sub_opts):
for bucket_name in self.args:
self._ListChannels(bucket_name)
else:
for bucket_name, notification in self._EnumerateNotificationsFromArgs(
accept_notification_configs=False):
self._PrintNotificationDetails(bucket_name, notification)
return 0
def _PrintNotificationDetails(self, bucket, notification):
print('projects/_/buckets/{bucket}/notificationConfigs/{notification}\n'
'\tCloud Pub/Sub topic: {topic}'.format(
bucket=bucket,
notification=notification.id,
topic=notification.topic[len('//pubsub.googleapis.com/'):]))
if notification.custom_attributes:
print('\tCustom attributes:')
for attr in notification.custom_attributes.additionalProperties:
print('\t\t%s: %s' % (attr.key, attr.value))
filters = []
if notification.event_types:
filters.append('\t\tEvent Types: %s' %
', '.join(notification.event_types))
if notification.object_name_prefix:
filters.append("\t\tObject name prefix: '%s'" %
notification.object_name_prefix)
if filters:
print('\tFilters:')
for line in filters:
print(line)
self.logger.info('')
def _Delete(self):
for bucket_name, notification in self._EnumerateNotificationsFromArgs():
self._DeleteNotification(bucket_name, notification.id)
return 0
def _DeleteNotification(self, bucket_name, notification_id):
self.gsutil_api.DeleteNotificationConfig(bucket_name,
notification=notification_id,
provider='gs')
return 0
def _RunSubCommand(self, func):
try:
(self.sub_opts,
self.args) = getopt.getopt(self.args,
self.command_spec.supported_sub_args)
# Commands with both suboptions and subcommands need to reparse for
# suboptions, so we log again.
metrics.LogCommandParams(sub_opts=self.sub_opts)
return func(self)
except getopt.GetoptError:
self.RaiseInvalidArgumentException()
SUBCOMMANDS = {
'create': _Create,
'list': _List,
'delete': _Delete,
'watchbucket': _WatchBucket,
'stopchannel': _StopChannel
}
def RunCommand(self):
"""Command entry point for the notification command."""
self.subcommand_name = self.args.pop(0)
if self.subcommand_name in NotificationCommand.SUBCOMMANDS:
metrics.LogCommandParams(subcommands=[self.subcommand_name])
return self._RunSubCommand(
NotificationCommand.SUBCOMMANDS[self.subcommand_name])
else:
raise CommandException('Invalid subcommand "%s" for the %s command.' %
(self.subcommand_name, self.command_name))
| 40.081608 | 86 | 0.697329 | [
"Apache-2.0"
] | BobiGilburd/gsutil | gslib/commands/notification.py | 32,907 | Python |
from spyre import server
import pickle
class StockExample(server.App):
title = "Historical Stock Prices"
inputs = [{
"input_type": 'dropdown',
"label": 'Company',
"options": [
{"label": "Google", "value": "GOOG"},
{"label": "Yahoo", "value": "YHOO"},
{"label": "Apple", "value": "AAPL"}],
"variable_name": 'ticker',
"action_id": "plot"
}]
outputs = [
{"output_type": "plot", "output_id": "plot"},
{"output_type": "table", "output_id": "table"}
]
def getData(self, params):
dfs = pickle.load((open("stocks.pkl", "rb")))
ticker = params['ticker']
return dfs[ticker]
def getPlot(self, params):
df = self.getData(params)
plt_obj = df.set_index('Date').drop(['volume'], axis=1).plot()
plt_obj.set_ylabel("Price")
fig = plt_obj.get_figure()
return fig
if __name__ == '__main__':
app = StockExample()
app.launch(port=9093)
| 24.238095 | 70 | 0.535363 | [
"MIT"
] | adamhajari/spyre | tutorial/quickstart/stocks_example_no_internet_simple2.py | 1,018 | Python |
#!/bin/env python3
import os
import time
import socket
import pickle
from optparse import OptionParser
if __name__=='__main__':
parser = OptionParser(usage="%prog: [options]")
parser.add_option("", "--fname", dest="fname", type="string",
help="Send a file, can also put filename at end")
parser.add_option("", "--mcast", dest="ip", type="string", default="224.1.1.5",
help="IP Address to send to [default=%default]")
parser.add_option("", "--port", dest="port", type="int", default=10000,
help="IP Address to send to [default=%default]")
(options, args) = parser.parse_args()
fname = options.fname
if fname == None:
if len(args) > 0:
fname = args[0]
else:
parser.print_help()
exit(1)
MCAST_GRP = options.ip
MCAST_PORT = options.port
MCAST_TTL = 2
print("Sending file", fname,"mcast group", MCAST_GRP, "port", MCAST_PORT)
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
send_sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, MCAST_TTL)
segments = []
ff = open(fname, 'rb')
num = ff.read(1024)
pnum = 0
while len(num) > 0:
segments.append({"size":len(num), "num":pnum,"data":num,"fname":fname})
pnum += 1
num = ff.read(1024)
for packet in segments:
packet["total"] = len(segments)
while True:
for packet in segments:
send_sock.sendto(pickle.dumps(packet), (MCAST_GRP, MCAST_PORT))
time.sleep(0.0001)
print("Sent file")
time.sleep(0.100)
| 28.40678 | 84 | 0.592482 | [
"MIT"
] | Fullaxx/RapidBuilds | RapidBuild64/003-interp/RLBFILES/rootcopy/usr/bin/rl_mcastsend.py | 1,676 | Python |
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="calver",
description="Setuptools extension for CalVer package versions",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/di/calver",
author="Dustin Ingram",
author_email="[email protected]",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
],
keywords="calver",
package_dir={"": "src"},
packages=find_packages(where="src"),
python_requires=">=3.5",
entry_points={
"distutils.setup_keywords": [
"use_calver = calver.integration:version",
],
},
use_calver=True,
)
| 28.628571 | 67 | 0.651697 | [
"Apache-2.0"
] | di/calver | setup.py | 1,002 | Python |
import tensorflow as tf
from google.protobuf import json_format, text_format
from tensorflow.contrib import graph_editor as ge
def save():
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, name="input")
a = tf.Variable(5.0)
res: tf.Tensor = tf.multiply(a, x, name="mul")
with tf.Session(graph=g) as sess:
sess.run(tf.global_variables_initializer())
g: tf.Graph = tf.get_default_graph()
gdef = g.as_graph_def()
gdef = tf.graph_util.convert_variables_to_constants(
sess,
gdef, ["mul"],
variable_names_whitelist=None,
variable_names_blacklist=None
)
tf.train.write_graph(gdef, logdir="/tmp/k", name="test")
def load():
with tf.Graph().as_default() as g:
with tf.Session(graph=g) as sess:
x = tf.placeholder(tf.float32)
xx = 2 * x + 7
with open("/tmp/k/test", 'rb') as f:
graph_def = tf.GraphDef()
text_format.Merge(f.read(), graph_def)
y = tf.import_graph_def(
graph_def,
input_map={
"input:0": xx,
},
return_elements=["mul:0"],
name=None,
op_dict=None,
producer_op_list=None
)
print(sess.run(y, feed_dict={
x: 15,
}))
def main():
# save()
load()
with tf.Graph().as_default():
rr = tf.constant(15.0)
with tf.Session() as sess:
meta_graph_def = tf.saved_model.loader.load(
sess, [tf.saved_model.tag_constants.SERVING], "/tmp/lll"
)
signature = meta_graph_def.signature_def
signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
x_tensor_name = signature[signature_key].inputs["x"].name
y_tensor_name = signature[signature_key].outputs["y"].name
x = sess.graph.get_tensor_by_name(x_tensor_name)
y = sess.graph.get_tensor_by_name(y_tensor_name)
h = tf.get_session_handle(rr)
h = sess.run(h)
y_out = sess.run(y, {x: h})
# print(y_out)
# for op in tf.get_default_graph().get_operations():
# print(op.name)
if __name__ == "__main__":
main()
| 30.225 | 96 | 0.547974 | [
"MIT"
] | nmiculinic/minion-basecaller | mincall/_experiments/load_save_modify.py | 2,418 | Python |
from one import abc
print("Hello")
abc.abc() | 14.666667 | 19 | 0.727273 | [
"MIT"
] | Maurya232Abhishek/Python-repository-for-basics | Interchange.py | 44 | Python |
import base64
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
import django.utils.simplejson as json
import boto.sqs.message
from django.contrib.contenttypes.models import ContentType
class ModelInstanceMessage(boto.sqs.message.RawMessage):
"""SQS Message class that returns
"""
def __init__(self, queue=None, instance=None):
boto.sqs.message.RawMessage.__init__(
self, queue=queue, body=instance)
def encode(self, value):
ct = ContentType.objects.get_for_model(value)
return base64.b64encode(
json.dumps(
(ct.app_label, ct.model, value.pk)))
def decode(self, value):
try:
app_label, model, pk = json.loads(base64.b64decode(value))
except Exception, e:
self.__reason = "Error decoding payload: %s" % e
return None
try:
ct = ContentType.objects.get(app_label=app_label, model=model)
except ContentType.DoesNotExist:
self.__reason = "Invalid content type."
return None
cls = ct.model_class()
try:
return cls.objects.get(pk=pk)
except cls.DoesNotExist:
self.__reason = "%s.%s %r does not exist" % (
cls.__module__, cls.__name__, pk)
return None
def get_body(self):
rv = boto.sqs.message.RawMessage.get_body(self)
if rv is not None:
return rv
raise ValueError(self.__reason)
def get_instance(self):
return self.get_body()
| 28.034483 | 74 | 0.603936 | [
"BSD-3-Clause"
] | mpasternacki/django-sqs | django_sqs/message.py | 1,626 | Python |
from __future__ import print_function
import sys
from operator import add
from pyspark import SparkContext
from pyspark import SparkConf,SparkContext
from pyspark.streaming import StreamingContext
import sys
import requests
from operator import add
from pyspark.sql.types import *
from pyspark.sql import functions as func
from pyspark.sql.functions import lit
from pyspark.sql.functions import udf
from pyspark.sql.functions import *
from pyspark.sql.functions import array
# if __name__ == "__main__":
# create spark configuration
# conf = SparkConf(appName="TPCH-Example")
# create spark context with the above configuration
# sc = SparkContext(conf=conf)
# lineitems = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(sys.arg[1])
path="file:////home/kia/GIT/MET-CS777/data/tpch_tables_scale_0.1/"
# path is where you have the folder. It can be a distributed path like S3, gc or hdfs
customer = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"customer.tbl")
orders = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"orders.tbl")
lineitems = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"lineitem.tbl")
part = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"part.tbl")
supplier = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"supplier.tbl")
partsupp = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"partsupp.tbl")
region = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"region.tbl")
nation = sqlContext.read.format('csv').options(header='true', inferSchema='true', sep ="|").load(path+"nation.tbl")
# You can convert all to RDDs if you want.
# customerRDD=customer.rdd
# ordersRDD=orders.rdd
# lineitemsRDD=lineitems.rdd
# partRDD=part.rdd
# supplierRDD=supplier.rdd
# partsuppRDD=partsupp.rdd
# regionRDD=region.rdd
# nationRDD=nation.rdd
# Question 1
# Implement a pyspark code that can find out the top-10 sold products.
lines = lineitems.select("ORDERKEY", "PARTKEY")\
.withColumn("COUNT", lit(1))\
.groupBy("PARTKEY").agg(func.sum("COUNT"))
result_1 = lines.orderBy("sum(COUNT)", ascending=False).limit(10).show()
result_1.saveAsTextFile(sys.arg[2])
# ---------------------------------------------------------------------------
# Question 2
# Find the top-10 customers based on the number of products ordered.
order_parts = lineitems.select("ORDERKEY", "PARTKEY")
customer_orders = orders.select("ORDERKEY", "CUSTKEY")
# Here we get the a table of all customers and their ordered parts.
customer_parts = customer_orders.join(order_parts, customer_orders.ORDERKEY == order_parts.ORDERKEY , 'full' ).drop('ORDERKEY')
# After we have a table of (CUSTKEY, ORDERKEY), we can just count up the number of times that we see the customer key in the table for each customer
# And this is a the number of times that the customer ordered parts.
customer_parts.withColumn("COUNT", lit(1)).groupBy("CUSTKEY").agg(func.sum("COUNT")).orderBy("sum(COUNT)", ascending=False).limit(10).show()
# +-------+----------+
# |CUSTKEY|sum(COUNT)|
# +-------+----------+
# | 8362| 155|
# | 346| 153|
# | 14707| 149|
# | 11998| 148|
# | 9454| 148|
# | 14398| 147|
# | 85| 142|
# | 10354| 142|
# | 3709| 141|
# | 547| 141|
# +-------+----------+
# ---------------------------------------------------------------------------
# Question 3
# Find the top-10 customers that have ordered products from the same supplier.
partsupp_keys=partsupp.select("PARTKEY", "SUPPKEY")
custpmer_supplier=customer_parts.join(partsupp_keys, customer_parts.PARTKEY == partsupp.PARTKEY , 'full' ).drop('PARTKEY')
custpmer_supplier.withColumn("COUNT", lit(1)).groupBy("CUSTKEY", "SUPPKEY").agg(func.sum("COUNT")).orderBy("sum(COUNT)", ascending=False).limit(10).show()
# +-------+-------+----------+
# |CUSTKEY|SUPPKEY|sum(COUNT)|
# +-------+-------+----------+
# | 4567| 844| 7|
# | 4792| 592| 6|
# | 11809| 17| 6|
# | 14767| 8| 6|
# | 2173| 572| 6|
# | 6139| 233| 6|
# | 874| 430| 6|
# | 154| 380| 5|
# | 6889| 729| 5|
# | 8794| 545| 5|
# +-------+-------+----------+
# ---------------------------------------------------------------------------
# Question 4 and 5
# Find the customers who have not ordered products from their own country and have ordered only foreign products.
# Solution:
# We get from custpmer_supplier CUSTKEY and SUPPKEY
# custpmer_supplier.show()
# +-------+-------+
# |CUSTKEY|SUPPKEY|
# +-------+-------+
# | 9733| 149|
# | 9733| 399|
# | 9733| 649|
# ...
# We need to just check if the customer has ordered something from his own country.
custpmer_supplier.show()
customer_nationKey = customer.select("CUSTKEY", "NATIONKEY")
supplier_nationKey = supplier.select("SUPPKEY", "NATIONKEY")
custpmer_supplier_custNation = custpmer_supplier.join(customer_nationKey, "CUSTKEY", 'full')
custpmer_supplier_supNation = custpmer_supplier.join(supplier_nationKey, "SUPPKEY", 'full')
from pyspark.sql import functions as F
from pyspark.sql.functions import udf
custpmer_supplier_custNation_agg = custpmer_supplier_custNation.groupBy("CUSTKEY").agg(F.collect_set("NATIONKEY").alias('agg_nation'))
cSN_agg_withSupp = custpmer_supplier_custNation.join(custpmer_supplier_custNation_agg, "CUSTKEY" , 'full')
# We need to cast the NationKey to IntegerType
cSN_agg_withSupp = cSN_agg_withSupp.withColumn("NATIONKEY", cSN_agg_withSupp["NATIONKEY"].cast(IntegerType()))
# Check Schema
cSN_agg_withSupp.printSchema()
# Define a UDF to check if the nation of the customer is in the list of his orders_products_nations
isIn_udf = udf(lambda element, mlist: True if element in mlist else False, BooleanType())
from_own = cSN_agg_withSupp.withColumn("from_own", isIn_udf(cSN_agg_withSupp.NATIONKEY, cSN_agg_withSupp.agg_nation))
# Should return none after filter because we have no customer that have not ordered products from his own country.
from_own.filter(from_own.from_own==False).show()
# from_own.show()
# +-------+-------+---------+----------+--------+
# |CUSTKEY|SUPPKEY|NATIONKEY|agg_nation|from_own|
# +-------+-------+---------+----------+--------+
# +-------+-------+---------+----------+--------+
# ---------------------------------------------------------------------------
# Question 6
# Find the top-10 similar customers based of their orders. (Jaccard Similarity)
# First of all we collect all of the products that each customer ordered.
custoemr_partset=customer_parts.groupBy("CUSTKEY").agg(F.collect_set("PARTKEY").alias('product_list'))
# Do the cross join and rename fields
customers_parts_combi = custoemr_partset.crossJoin(custoemr_partset ).toDF("C1", "L1", "C2" , "L2").filter("C1 not like C2")
# then we can drop duplicates which might take longer time.
customers_parts_combi = customers_parts_combi.dropDuplicates(['C1','C2'])
# Define a user defined function for calculation of Jaccard similarity distance
# Return type is Float and it should defined.
jaccard = udf(lambda a, b: float(float( len(set(a) & set(b))) / len( set(a) | set(b) )) , FloatType())
customer_jaccard = customers_parts_combi.withColumn("jaccard", jaccard(customers_parts_combi.L1, customers_parts_combi.L2))
# The following line will cause large number of computation tasks.
customer_jaccard.orderBy("jaccard", ascending=False).limit(10).show()
# On TPCH scale 0.1 you can get the following results
# +-----+--------------------+-----+--------------------+----------+
# | C1| L1| C2| L2| jaccard|
# +-----+--------------------+-----+--------------------+----------+
# |10376|[13032, 18343, 15...| 8456|[15747, 18343, 41...|0.09090909|
# | 8456|[15747, 18343, 41...|10376|[13032, 18343, 15...|0.09090909|
# | 4808|[17169, 19122, 33...|10901|[10142, 9529, 124...|0.06666667|
# |10901|[10142, 9529, 124...| 4808|[17169, 19122, 33...|0.06666667|
# | 7532|[15572, 2151, 174...| 5390|[5452, 16969, 755...|0.06451613|
# | 5390|[5452, 16969, 755...| 7532|[15572, 2151, 174...|0.06451613|
# | 2489|[6418, 7101, 7102...| 4283|[13060, 12044, 12...|0.06349207|
# | 4283|[13060, 12044, 12...| 2489|[6418, 7101, 7102...|0.06349207|
# | 7739|[9743, 16030, 489...| 5462|[6890, 7231, 1737...| 0.0625|
# | 4385|[1648, 7100, 1122...| 2768|[19866, 1648, 123...| 0.0625|
# +-----+--------------------+-----+--------------------+----------+
# The most similar customers are 10376 and 8456
# ---------------------------------------------------------------------------
# Question 7
# Find the top-10 product pairs that are ordered mostly together.
# RDD solution
# Easier to do it in RDD
lineitemsRDD = sqlContext.read.format('csv').options(header='true', inferSchema='false', sep ="|").load(path+"lineitem.tbl").rdd
orderPartsRDD = lineitemsRDD.map(lambda x: (x[0], x[1]))
order_PartList_RDD = orderPartsRDD.combineByKey(lambda x: [x], lambda u, v: u + [v], lambda u1,u2: u1+u2).map(lambda x:(x[0], list(x[1]))).filter(lambda x: len(x[1])>1)
from itertools import combinations
order_PartPermutList_RDD= order_PartList_RDD .flatMap(lambda x: combinations(x[1], 2) ).map(lambda x: ((x[0], x[1] ), 1))
order_PartPermutList_RDD.reduceByKey(lambda a, b:a+b).top(10, lambda x: x[1])
# Dataframe
from pyspark.sql import functions as F
order_parts = lineitems.select("ORDERKEY", "PARTKEY")
partLists= order_parts.groupBy("ORDERKEY").agg(F.collect_set("PARTKEY").alias('listParts'))
# Process only pairs of products - remove orders that include only one single product.
partLists= partLists.where(size(col("listParts")) > 1)
# Define a function to create all pair combinations.
# You can also use itertools
# import itertools
# from itertools import permutations
# I define here the following permutation function.
def permut(x):
a=list()
for i in range(len(x)):
for j in range(i, len(x)):
if(i != j):
a.append(str(x[i])+"-"+str(x[j]))
return a
# ...
| 35.092409 | 168 | 0.622966 | [
"BSD-3-Clause"
] | AvantikaDG/MET-CS777 | Spark-Example-TPCH/TPCH-Example_Solution_Dataframe.py | 10,633 | Python |
__copyright__ = "Copyright 2016-2018, Netflix, Inc."
__license__ = "Apache, Version 2.0"
"""
Run embedded doctests
"""
import doctest
from vmaf.tools import misc
from vmaf.tools import stats
def test_doctest():
doctest.testmod(misc)
doctest.testmod(stats)
| 16.75 | 52 | 0.735075 | [
"Apache-2.0"
] | 2uropa/vmaf | python/test/doctest_test.py | 268 | Python |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains an example of skill for an AEA."""
from aea.configurations.base import PublicId
PUBLIC_ID = PublicId.from_str("fetchai/gym:0.13.0")
| 36.730769 | 80 | 0.604188 | [
"Apache-2.0"
] | marcofavorito/agents-aea | packages/fetchai/skills/gym/__init__.py | 955 | Python |
dist = int(input())
clubs = int(input())
clublist = []
strokes = [0]*(dist+1)
for i in range(clubs):
clublist.append(int(input()))
for i in range(1, dist+1):
min_club = 1000000
for j in range(clubs):
if i - clublist[j] >= 0:
min_club = min(min_club, strokes[i-clublist[j]]+1)
strokes[i] = min_club
if strokes[dist] != 1000000:
print(f"Roberta wins in {strokes[dist]} strokes.")
else:
print("Roberta acknowledges defeat.")
| 20.521739 | 62 | 0.612288 | [
"MIT"
] | Togohogo1/Programming-Problems | CCC/CCC_00_S4_Golf.py | 472 | Python |
from vyxal.parse import parse
from vyxal.transpile import transpile
def test_if():
# TODO(user/cgccuser) try with more branches
vy = """[ 1 | 2 ]"""
py = transpile(vy)
expected = """condition = pop(stack, 1, ctx=ctx)
if boolify(condition, ctx):
stack.append(1)
else:
stack.append(2)
"""
assert py == expected
| 21.1875 | 52 | 0.640118 | [
"MIT"
] | a-stone-arachnid/Vyxal | tests/test_transpiler.py | 339 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from oslo_config import fixture as config_fixture
from keystone.common import provider_api
from keystone.credential.providers import fernet as credential_provider
from keystone.tests import unit
from keystone.tests.unit import default_fixtures
from keystone.tests.unit import ksfixtures
from keystone.tests.unit.ksfixtures import database
from keystone.credential.backends import sql as credential_sql
from keystone import exception
PROVIDERS = provider_api.ProviderAPIs
class SqlTests(unit.SQLDriverOverrides, unit.TestCase):
def setUp(self):
super(SqlTests, self).setUp()
self.useFixture(database.Database())
self.load_backends()
# populate the engine with tables & fixtures
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_files(self):
config_files = super(SqlTests, self).config_files()
config_files.append(unit.dirs.tests_conf('backend_sql.conf'))
return config_files
class SqlCredential(SqlTests):
def _create_credential_with_user_id(self, user_id=None):
if not user_id:
user_id = uuid.uuid4().hex
credential = unit.new_credential_ref(user_id=user_id,
extra=uuid.uuid4().hex,
type=uuid.uuid4().hex)
PROVIDERS.credential_api.create_credential(
credential['id'], credential
)
return credential
def _validate_credential_list(self, retrieved_credentials,
expected_credentials):
self.assertEqual(len(expected_credentials), len(retrieved_credentials))
retrieved_ids = [c['id'] for c in retrieved_credentials]
for cred in expected_credentials:
self.assertIn(cred['id'], retrieved_ids)
def setUp(self):
super(SqlCredential, self).setUp()
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'credential',
credential_provider.MAX_ACTIVE_KEYS
)
)
self.credentials = []
self.user_credentials = []
# setup 3 credentials with random user ids
for _ in range(3):
cred = self._create_credential_with_user_id()
self.user_credentials.append(cred)
self.credentials.append(cred)
# setup 3 credentials with specific user ids
for _ in range(3):
cred = self._create_credential_with_user_id(self.user_foo['id'])
self.user_credentials.append(cred)
self.credentials.append(cred)
def test_backend_credential_sql_hints_none(self):
credentials = PROVIDERS.credential_api.list_credentials(hints=None)
self._validate_credential_list(credentials, self.user_credentials)
def test_backend_credential_sql_no_hints(self):
credentials = PROVIDERS.credential_api.list_credentials()
self._validate_credential_list(credentials, self.user_credentials)
def test_backend_credential_sql_encrypted_string(self):
cred_dict = {
'id': uuid.uuid4().hex,
'type': uuid.uuid4().hex,
'hash': uuid.uuid4().hex,
'encrypted_blob': b'randomdata'
}
ref = credential_sql.CredentialModel.from_dict(cred_dict)
# Make sure CredentialModel is handing over a text string
# to the database. To avoid encoding issues
self.assertIsInstance(ref.encrypted_blob, str)
def test_credential_limits(self):
config_fixture_ = self.user = self.useFixture(config_fixture.Config())
config_fixture_.config(group='credential', user_limit=4)
self._create_credential_with_user_id(self.user_foo['id'])
self.assertRaises(exception.CredentialLimitExceeded,
self._create_credential_with_user_id,
self.user_foo['id'])
| 39.224138 | 79 | 0.675824 | [
"Apache-2.0"
] | 10088/keystone | keystone/tests/unit/credential/test_backend_sql.py | 4,550 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Twinboundary plot
This module provide various kinds of plot related to twin boudnary.
"""
import numpy as np
from copy import deepcopy
from twinpy.plot.base import line_chart
def plot_plane(ax,
distances:list,
z_coords:list,
label:str=None,
decorate:bool=True,
show_half:bool=False,
**kwargs):
"""
Plot plane.
Args:
ax: matplotlib ax.
distances (list): List of plane intervals.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
show_half: If True, atom planes which are periodically equivalent are
not showed.
"""
if decorate:
xlabel = 'Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_distances = deepcopy(distances)
_z_coords = deepcopy(z_coords)
_distances.insert(0, distances[-1])
_distances.append(distances[0])
_z_coords.insert(0, -distances[-1])
_z_coords.append(z_coords[-1]+distances[0])
c = np.sum(distances)
fixed_z_coords = _z_coords + distances[0] / 2 - c / 2
num = len(fixed_z_coords)
bulk_distance = _distances[int(num/4)]
if show_half:
n = int((num + 2) / 4)
_distances = _distances[n:3*n]
fixed_z_coords = fixed_z_coords[n:3*n]
line_chart(ax=ax,
xdata=_distances,
ydata=fixed_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y',
**kwargs)
if decorate:
xmin = bulk_distance - 0.025
xmax = bulk_distance + 0.025
if show_half:
ax.hlines(0,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
else:
tb_idx = [1, int(num/2), num-1]
for idx in tb_idx:
ax.hlines(fixed_z_coords[idx]-distances[0]/2,
xmin=xmin-0.01,
xmax=xmax+0.01,
linestyle='--',
color='k',
linewidth=1.)
def plot_angle(ax,
angles:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Angle'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_angles = deepcopy(angles)
_z_coords = deepcopy(z_coords)
_angles.append(angles[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_angles,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_angle = angles[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_angle+2,
linestyle='--',
linewidth=1.5)
def plot_pair_distance(ax,
pair_distances:list,
z_coords:list,
label:str=None,
decorate:bool=True):
"""
Plot angle.
Args:
ax: matplotlib ax.
pair_distances (list): List of A-B pair distances, which is originally
primitive pair in HCP structure.
z_coords (list): List of z coordinate of each plane.
label (str): Plot label.
decorate (bool): If True, ax is decorated.
"""
if decorate:
xlabel = 'Pair Distance'
ylabel = 'Hight'
else:
xlabel = ylabel = None
_pair_distances = deepcopy(pair_distances)
_z_coords = deepcopy(z_coords)
_pair_distances.append(pair_distances[0])
_z_coords.append(z_coords[-1]+z_coords[1])
line_chart(ax=ax,
xdata=_pair_distances,
ydata=_z_coords,
xlabel=xlabel,
ylabel=ylabel,
label=label,
sort_by='y')
if decorate:
num = len(_z_coords)
tb_idx = [0, int(num/2), num-1]
bulk_pair_distance = pair_distances[int(num/4)]
for idx in tb_idx:
ax.hlines(_z_coords[idx],
xmin=-1,
xmax=bulk_pair_distance+2,
linestyle='--',
linewidth=1.5)
| 28.034091 | 78 | 0.502635 | [
"MIT"
] | kei0822kei/twinpy | twinpy/plot/twinboundary.py | 4,934 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tools
import db
import time
import re
import json
from plugins import base
# from plugins import lista
from plugins import listb
from plugins import dotpy
class Iptv (object):
def __init__ (self) :
self.T = tools.Tools()
self.DB = db.DataBase()
def run(self) :
Base = base.Source()
Base.getSource()
Dotpy = dotpy.Source()
Dotpy.getSource()
listB = listb.Source()
listB.getSource()
# # listA = lista.Source()
# # urlList = listA.getSource()
# # for item in urlList :
# # self.addData(item)
self.outPut()
self.outJson()
print("DONE!!")
def outPut (self) :
sql = """SELECT * FROM
(SELECT * FROM %s WHERE online = 1 ORDER BY delay DESC) AS delay
GROUP BY LOWER(delay.title)
HAVING delay.title != '' and delay.title != 'CCTV-' AND delay.delay < 500
ORDER BY level ASC, length(title) ASC, title ASC
""" % (self.DB.table)
result = self.DB.query(sql)
with open('tv.m3u8', 'w') as f:
f.write("#EXTM3U\n")
for item in result :
className = '其他频道'
if item[4] == 1 :
className = '中央频道'
elif item[4] == 2 :
className = '地方频道'
elif item[4] == 3 :
className = '地方频道'
elif item[4] == 7 :
className = '广播频道'
else :
className = '其他频道'
f.write("#EXTINF:-1, group-title=\"%s\", %s\n" % (className, item[1]))
f.write("%s\n" % (item[3]))
def outJson (self) :
sql = """SELECT * FROM
(SELECT * FROM %s WHERE online = 1 ORDER BY delay DESC) AS delay
GROUP BY LOWER(delay.title)
HAVING delay.title != '' and delay.title != 'CCTV-' AND delay.delay < 500
ORDER BY level ASC, length(title) ASC, title ASC
""" % (self.DB.table)
result = self.DB.query(sql)
fmtList = {
'cctv': [],
'local': [],
'other': [],
'radio': []
}
for item in result :
tmp = {
'title': item[1],
'url': item[3]
}
if item[4] == 1 :
fmtList['cctv'].append(tmp)
elif item[4] == 2 :
fmtList['local'].append(tmp)
elif item[4] == 3 :
fmtList['local'].append(tmp)
elif item[4] == 7 :
fmtList['radio'].append(tmp)
else :
fmtList['other'].append(tmp)
jsonStr = json.dumps(fmtList)
with open('tv.json', 'w') as f:
f.write(jsonStr)
if __name__ == '__main__':
obj = Iptv()
obj.run()
| 26.276786 | 86 | 0.458716 | [
"MIT"
] | SchumyHao/iptv-m3u-maker | main.py | 2,991 | Python |
# isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch_glow
from torch_glow import InputMeta, CompilationOptions, GlowCompileSpec
import torch
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
def run_model(m, input, randomize):
torch_glow.disableFusionPass()
traced_m = torch.jit.trace(m, input)
input_meta = InputMeta()
input_meta.set_same_as(input)
inputs = [input_meta]
options = CompilationOptions()
options.backend = "Interpreter"
options.randomize_constants = randomize
spec = GlowCompileSpec()
spec.set(inputs, options)
glow_m = torch_glow.to_glow(traced_m, {"forward": spec})
return glow_m.forward(input)
class TestRandomizeWeights(unittest.TestCase):
def test_randomize_weights(self):
m = Model()
input = torch.randn(5)
normal1 = run_model(m, input, False)
normal2 = run_model(m, input, False)
rand = run_model(m, input, True)
assert torch.allclose(normal1, normal2)
assert not torch.allclose(normal1, rand)
| 26.723404 | 82 | 0.696656 | [
"Apache-2.0"
] | 842974287/glow | torch_glow/tests/functionality/randomize_constants_test.py | 1,256 | Python |
from .convert_index_to_indices import convert_index_to_indices as index_to_indices
from .convert_indices_to_index import convert_indices_to_index as indices_to_index
from .convert_indices_to_zip_type import convert_indices_to_zip_type as indices_to_zip_type
from .convert_zip_type_to_indices import convert_zip_type_to_indices as zip_type_to_indices
| 70 | 91 | 0.92 | [
"MIT"
] | cantbeblank96/kevin_toolbox | kevin/machine_learning/patch_for_numpy/axis_and_dim/convert/__init__.py | 350 | Python |
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : test.py
# Abstract : The common testing api for video text recognition, track, quality score
# Current Version: 1.0.0
# Date : 2021-06-02
##################################################################################################
"""
import numpy as np
import mmcv
import torch
def single_gpu_test(model,
data_loader):
""" Test model with single GPU, used for visualization.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
Returns:
dict: test results
"""
model.eval()
results = dict()
results['texts'] = []
results['img_info'] = []
results['glimpses'] = []
results['scores'] = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
texts = result['text']
glimpses = result['glimpses']
glimpses = glimpses.cpu().numpy()
img_infos = result['img_info']
scores = result['scores']
scores = scores.cpu().numpy()
scores = scores.reshape(-1)
batch_size = len(texts)
results['texts'].extend(texts)
results['img_info'].extend(img_infos)
results['glimpses'].extend(glimpses)
results['scores'].extend(scores)
for _ in range(batch_size):
prog_bar.update()
new_glimpse = np.stack(results['glimpses'])
results['glimpses'] = new_glimpse
return results
| 32.052632 | 98 | 0.539135 | [
"Apache-2.0"
] | hikopensource/DAVAR-Lab-OCR | davarocr/davarocr/davar_videotext/apis/test.py | 1,827 | Python |
# -*- coding: utf-8 -*-
"""
pyQode is a source code editor widget for PyQt5
pyQode is a **namespace package**.
"""
import pkg_resources
pkg_resources.declare_namespace(__name__)
| 19.888889 | 47 | 0.731844 | [
"MIT"
] | SunChuquin/pyqode.qt | pyqode/__init__.py | 179 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*- #
# This file is only used if you use `make publish` or
# explicitly specify it as your config file.
import os
import sys
sys.path.append(os.curdir)
from pelicanconf import *
# If your site is available via HTTPS, make sure SITEURL begins with https://
SITEURL = ''
RELATIVE_URLS = False
FEED_ALL_ATOM = 'atom.xml'
#CATEGORY_FEED_ATOM = 'feeds/{slug}.atom.xml'
DELETE_OUTPUT_DIRECTORY = True
# Following items are often useful when publishing
#DISQUS_SITENAME = ""
#GOOGLE_ANALYTICS = "" | 22.541667 | 77 | 0.733826 | [
"MIT"
] | jandolezal/planetavos | publishconf.py | 541 | Python |
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = | 15.076923 | 40 | 0.77551 | [
"MIT"
] | mjhow4/attendance-app | Court-APP/users/__init__.py | 196 | Python |
from pyspedas import tnames
from pytplot import get_data, store_data, options
def mms_load_fpi_calc_pad(probe='1', level='sitl', datatype='', data_rate='', suffix='', autoscale=True):
"""
Calculates the omni-directional pitch angle distribution (summed and averaged)
from the individual tplot variables
Parameters:
probe: str
probe, valid values for MMS probes are ['1','2','3','4'].
level: str
indicates level of data processing. the default if no level is specified is 'sitl'
datatype: str
Valid datatypes for FPI are:
Quicklook: ['des', 'dis']
SITL: '' (none; loads both electron and ion data from single CDF)
L1b/L2: ['des-dist', 'dis-dist', 'dis-moms', 'des-moms']
data_rate: str
instrument data rates for FPI include 'brst' and 'fast'. The
default is 'fast'.
suffix: str
The tplot variable names will be given this suffix. By default,
no suffix is added.
autoscale: bool
If set, use the default zrange; otherwise, use the min and max of the data for the zrange
Returns:
List of tplot variables created.
"""
out_vars = []
if isinstance(datatype, str):
if datatype == '*' or datatype == '':
if level.lower() == 'ql':
datatype = ['des', 'dis']
else:
datatype = ['des-dist', 'dis-dist']
if isinstance(datatype, str):
datatype = [datatype]
for dtype in datatype:
species = dtype[1]
if level.lower() == 'sitl':
spec_str_format = 'PitchAngDist'
obs_str_format = '_fpi_' + species
else:
spec_str_format = 'pitchAngDist'
obs_str_format = '_d' + species + 's_'
obsstr = 'mms' + str(probe) + obs_str_format
if level.lower() == 'l2':
spec_str_format = 'pitchangdist'
pad_vars = [obsstr+spec_str_format+'_'+erange+'en_'+data_rate+suffix for erange in ['low', 'mid', 'high']]
else:
pad_vars = [obsstr+spec_str_format+'_'+erange+'En'+suffix for erange in ['low', 'mid', 'high']]
pad_avg_name = obsstr+'PitchAngDist_avg'+suffix
low_en = get_data(pad_vars[0])
mid_en = get_data(pad_vars[1])
high_en = get_data(pad_vars[2])
if low_en is None or mid_en is None or high_en is None:
v3_low_pad = tnames(pad_vars[0].lower()+'_'+data_rate)
v3_mid_pad = tnames(pad_vars[1].lower()+'_'+data_rate)
v3_high_pad = tnames(pad_vars[2].lower()+'_'+data_rate)
if v3_low_pad == [] or v3_mid_pad == [] or v3_high_pad == []:
continue
low_en = get_data(v3_low_pad[0])
mid_en = get_data(v3_mid_pad[0])
high_en = get_data(v3_high_pad[0])
pad_avg_name = pad_avg_name.lower()
e_pad_sum = low_en.y+mid_en.y+high_en.y
e_pad_avg = e_pad_sum/3.0
if level == 'l2':
pad_avg_name = pad_avg_name.lower()
if species == 'e':
species_str = 'electron'
elif species == 'i':
species_str = 'ion'
if level == 'ql':
store_data(obsstr+'PitchAngDist_sum'+suffix, data={'x': low_en.times, 'y': e_pad_sum, 'v': low_en.v})
options(obsstr+'PitchAngDist_sum'+suffix, 'ytitle', 'MMS'+str(probe)+' \\ '+species_str+' \\ PAD \\ SUM')
options(obsstr+'PitchAngDist_sum'+suffix, 'yrange', [0, 180])
options(obsstr+'PitchAngDist_sum'+suffix, 'zlog', True)
options(obsstr+'PitchAngDist_sum'+suffix, 'spec', True)
options(obsstr+'PitchAngDist_sum'+suffix, 'Colormap', 'jet')
out_vars.append(obsstr+'PitchAngDist_sum'+suffix)
store_data(pad_avg_name, data={'x': low_en.times, 'y': e_pad_avg, 'v': low_en.v})
options(pad_avg_name, 'ztitle', 'eV/(cm!U2!N s sr eV)')
options(pad_avg_name, 'ytitle', 'MMS'+str(probe)+' \\ '+species_str+' \\ PAD \\ AVG')
options(pad_avg_name, 'yrange', [0, 180])
options(pad_avg_name, 'zlog', True)
options(pad_avg_name, 'spec', True)
options(pad_avg_name, 'Colormap', 'jet')
out_vars.append(pad_avg_name)
return out_vars | 38.122807 | 118 | 0.577773 | [
"MIT"
] | shihikoo/pyspedas | pyspedas/mms/fpi/mms_load_fpi_calc_pad.py | 4,346 | Python |
# -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# google-cloud-dataproc documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
# For plugins that can not read conf.py.
# See also: https://github.com/docascode/sphinx-docfx-yaml/issues/85
sys.path.insert(0, os.path.abspath("."))
__version__ = ""
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.5.5"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"recommonmark",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_options = {"members": True}
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The root toctree document.
root_doc = "index"
# General information about the project.
project = "google-cloud-dataproc"
copyright = "2019, Google"
author = "Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
"**/.nox/**/*",
"samples/AUTHORING_GUIDE.md",
"samples/CONTRIBUTING.md",
"samples/snippets/README.rst",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"description": "Google Cloud Client Libraries for google-cloud-dataproc",
"github_user": "googleapis",
"github_repo": "python-dataproc",
"github_banner": True,
"font_family": "'Roboto', Georgia, sans",
"head_font_family": "'Roboto', Georgia, serif",
"code_font_family": "'Roboto Mono', 'Consolas', monospace",
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-dataproc-doc"
# -- Options for warnings ------------------------------------------------------
suppress_warnings = [
# Temporarily suppress this to avoid "more than one target found for
# cross-reference" warning, which are intractable for us to avoid while in
# a mono-repo.
# See https://github.com/sphinx-doc/sphinx/blob
# /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843
"ref.python"
]
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
root_doc,
"google-cloud-dataproc.tex",
"google-cloud-dataproc Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
root_doc,
"google-cloud-dataproc",
"google-cloud-dataproc Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
root_doc,
"google-cloud-dataproc",
"google-cloud-dataproc Documentation",
author,
"google-cloud-dataproc",
"google-cloud-dataproc Library",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://python.readthedocs.org/en/latest/", None),
"google-auth": ("https://googleapis.dev/python/google-auth/latest/", None),
"google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None,),
"grpc": ("https://grpc.github.io/grpc/python/", None),
"proto-plus": ("https://proto-plus-python.readthedocs.io/en/latest/", None),
"protobuf": ("https://googleapis.dev/python/protobuf/latest/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
| 32.437173 | 88 | 0.704786 | [
"Apache-2.0"
] | LaudateCorpus1/python-dataproc | docs/conf.py | 12,391 | Python |
from collections import defaultdict
from advent_2021.helpers import get_input
def dfs(
caves: dict[str, list[str]],
current: str,
visited: set[str] | None = None,
) -> int:
if current == "end":
return 1
nb_paths = 0
if visited is None:
visited = set()
for cave in caves[current]:
if cave in visited:
continue
nb_paths += dfs(
caves, cave, visited | {current} if current.islower() else visited
)
return nb_paths
if __name__ == "__main__":
caves: dict[str, list[str]] = defaultdict(list)
for line in get_input():
caves[line.split("-")[0]].append(line.split("-")[1])
caves[line.split("-")[1]].append(line.split("-")[0])
print(dfs(caves, "start"))
| 23.484848 | 78 | 0.580645 | [
"MIT"
] | mgesbert/advent | src/advent_2021/day_12/part_1.py | 775 | Python |
#!/usr/bin/env python
# Copyright (c) 2019 The Zcash developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
import sys; assert sys.version_info < (3,), ur"This script does not run under Python 3. Please use Python 2.7.x."
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal, assert_true,
get_coinbase_address,
start_nodes, stop_nodes,
initialize_chain_clean, connect_nodes_bi, wait_bitcoinds,
wait_and_assert_operationid_status
)
from decimal import Decimal
class WalletPersistenceTest (BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory " + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 3)
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir,
extra_args=[[
'-nuparams=5ba81b19:100', # Overwinter
'-nuparams=76b809bb:201', # Sapling
]] * 3)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
self.is_network_split=False
self.sync_all()
def run_test(self):
# Sanity-check the test harness
self.nodes[0].generate(200)
assert_equal(self.nodes[0].getblockcount(), 200)
self.sync_all()
# Verify Sapling address is persisted in wallet (even when Sapling is not yet active)
sapling_addr = self.nodes[0].z_getnewaddress('sapling')
# Make sure the node has the addresss
addresses = self.nodes[0].z_listaddresses()
assert_true(sapling_addr in addresses, "Should contain address before restart")
# Restart the nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network()
# Make sure we still have the address after restarting
addresses = self.nodes[0].z_listaddresses()
assert_true(sapling_addr in addresses, "Should contain address after restart")
# Activate Sapling
self.nodes[0].generate(1)
self.sync_all()
# Node 0 shields funds to Sapling address
taddr0 = get_coinbase_address(self.nodes[0])
recipients = []
recipients.append({"address": sapling_addr, "amount": Decimal('20')})
myopid = self.nodes[0].z_sendmany(taddr0, recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Verify shielded balance
assert_equal(self.nodes[0].z_getbalance(sapling_addr), Decimal('20'))
# Verify size of shielded pools
pools = self.nodes[0].getblockchaininfo()['valuePools']
assert_equal(pools[0]['chainValue'], Decimal('0')) # Sprout
assert_equal(pools[1]['chainValue'], Decimal('20')) # Sapling
# Restart the nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network()
# Verify size of shielded pools
pools = self.nodes[0].getblockchaininfo()['valuePools']
assert_equal(pools[0]['chainValue'], Decimal('0')) # Sprout
assert_equal(pools[1]['chainValue'], Decimal('20')) # Sapling
# Node 0 sends some shielded funds to Node 1
dest_addr = self.nodes[1].z_getnewaddress('sapling')
recipients = []
recipients.append({"address": dest_addr, "amount": Decimal('15')})
myopid = self.nodes[0].z_sendmany(sapling_addr, recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[0], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Verify balances
assert_equal(self.nodes[0].z_getbalance(sapling_addr), Decimal('5'))
assert_equal(self.nodes[1].z_getbalance(dest_addr), Decimal('15'))
# Restart the nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network()
# Verify balances
assert_equal(self.nodes[0].z_getbalance(sapling_addr), Decimal('5'))
assert_equal(self.nodes[1].z_getbalance(dest_addr), Decimal('15'))
# Verify importing a spending key will update and persist the nullifiers and witnesses correctly
sk0 = self.nodes[0].z_exportkey(sapling_addr)
self.nodes[2].z_importkey(sk0, "yes")
assert_equal(self.nodes[2].z_getbalance(sapling_addr), Decimal('5'))
# Restart the nodes
stop_nodes(self.nodes)
wait_bitcoinds()
self.setup_network()
# Verify nullifiers persisted correctly by checking balance
# Prior to PR #3590, there will be an error as spent notes are considered unspent:
# Assertion failed: expected: <25.00000000> but was: <5>
assert_equal(self.nodes[2].z_getbalance(sapling_addr), Decimal('5'))
# Verity witnesses persisted correctly by sending shielded funds
recipients = []
recipients.append({"address": dest_addr, "amount": Decimal('1')})
myopid = self.nodes[2].z_sendmany(sapling_addr, recipients, 1, 0)
wait_and_assert_operationid_status(self.nodes[2], myopid)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
# Verify balances
assert_equal(self.nodes[2].z_getbalance(sapling_addr), Decimal('4'))
assert_equal(self.nodes[1].z_getbalance(dest_addr), Decimal('16'))
if __name__ == '__main__':
WalletPersistenceTest().main() | 38.520833 | 113 | 0.660718 | [
"MIT"
] | Cabecinha84/fluxd | qa/rpc-tests/wallet_persistence.py | 5,547 | Python |
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as f
from torch.autograd import Variable
import os
import numpy as np
from tqdm import tqdm
def reparameterize(mu, logvar):
eps = Variable(torch.randn(mu.size(0), mu.size(1))).cuda()
z = mu + eps * torch.exp(logvar / 2)
return z
class VAE_MLP_CAT(nn.Module):
def __init__(self, latent_code_num, hidden):
super(VAE_MLP_CAT, self).__init__()
self.encoder = nn.Sequential(
# 1, 124, 32
nn.Conv2d(1, 32, kernel_size=4, stride=2, padding=1),
nn.BatchNorm2d(32),
nn.LeakyReLU(0.2, inplace=True),
# 32, 62, 16
nn.Conv2d(32, 64, kernel_size=4, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.2, inplace=True),
# 64, 15, 15
nn.Conv2d(64, 128, kernel_size=5, stride=3, padding=1),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2, inplace=True),
# 128, 20, 5
)
self.fc11 = nn.Linear(128 * 10 * 5 * 2, latent_code_num)
self.fc12 = nn.Linear(128 * 10 * 5 * 2, latent_code_num)
self.mlp = nn.Sequential(
torch.nn.Linear(latent_code_num + 1, hidden),
torch.nn.Tanh(),
torch.nn.Linear(hidden, 1)
)
for p in self.mlp.parameters():
torch.nn.init.normal_(p, mean=0, std=0.1)
torch.nn.init.constant_(self.mlp[0].bias, val=0.)
torch.nn.init.constant_(self.mlp[2].bias, val=0.)
self.fc2 = nn.Linear(latent_code_num, 128 * 10 * 5 * 2)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=4, stride=2, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 32, kernel_size=4, stride=1, padding=1),
nn.ReLU(inplace=True),
nn.ConvTranspose2d(32, 1, kernel_size=6, stride=3, padding=1),
nn.Sigmoid()
)
def get_reparameterized_code(self, x):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
logvar = self.fc12(out2.view(out2.size(0), -1)) # batch_s, latent
z = self.reparameterize(mu, logvar) # batch_s, latent
return z
def forward(self, x, t):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
logvar = self.fc12(out2.view(out2.size(0), -1)) # batch_s, latent
pre = self.mlp(torch.cat((t, mu), dim=1))
z = reparameterize(mu, logvar) # batch_s, latent
out3 = self.fc2(z).view(z.size(0), 128, 20, 5) # batch_s, 8, 7, 7
return self.decoder(out3), mu, logvar, pre
def predict(self, x, t):
out1, out2 = self.encoder(x), self.encoder(x) # batch_s, 8, 7, 7
mu = self.fc11(out1.view(out1.size(0), -1)) # batch_s, latent
pre = self.mlp(torch.cat((t, mu), dim=1))
return pre
def get_mid(self, x):
out1, out2 = self.encoder(x), self.encoder(x)
mu = self.fc11(out1.view(out1.size(0), -1))
return mu
def decode(self, z):
out3 = self.fc2(z).view(1, 128, 20, 5)
return self.decoder(out3)
def loss_func(recon_x, x, mu, logvar, pre_, label_):
mse = torch.nn.MSELoss()
binary_cross_entropy = f.binary_cross_entropy(recon_x, x, size_average=False)
k_l_divergence = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
mse_loss = mse(pre_, label_)
return binary_cross_entropy + k_l_divergence + mse_loss
def train_vae_mlp(latent_code_num, hidden, params, device):
print('Totally ' + str(params['VAE_epoch_num']) + ' epochs to train')
os.environ['CUDA_VISIBLE_DEVICES'] = str(device)
thermal_conductivity_train_loader = torch.load('Data/thermal_conductivity_vae_mlp_train_loader.pkl')
heat_capacity_train_loader = torch.load('Data/heat_capacity_vae_mlp_train_loader.pkl')
heat_capacity_vae_mlp = VAE_MLP_CAT(latent_code_num, hidden).cuda()
thermal_conductivity_vae_mlp = VAE_MLP_CAT(latent_code_num, hidden).cuda()
thermal_conductivity_optimizer = optim.Adam(
thermal_conductivity_vae_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
heat_capacity_optimizer = optim.Adam(
heat_capacity_vae_mlp.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)
thermal_conductivity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
heat_capacity_total_loss_list = np.ones(params['VAE_epoch_num'] + 10)
thermal_conductivity_total_loss_list *= 5000000
heat_capacity_total_loss_list *= 5000000
thermal_conductivity_model_file_name = \
'Model_pkl/VAE_MLP_CAT_thermal_conductivity_latent_' + str(latent_code_num) + \
'_structure_' + str(hidden) + '.pkl'
heat_capacity_model_file_name = \
'Model_pkl/VAE_MLP_CAT_heat_capacity_latent_' + str(latent_code_num) + \
'_structure_' + str(hidden) + '.pkl'
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
thermal_conductivity_vae_mlp.train()
for i, data in enumerate(tqdm(thermal_conductivity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
thermal_conductivity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
label = data[3].cuda().reshape(data[3].shape[0], 1).type(torch.cuda.FloatTensor)
recon_x, mu, logvar, pre = thermal_conductivity_vae_mlp.forward(one_hot, t)
recon_x = recon_x[:, :, :one_hot.shape[2], :one_hot.shape[3]]
loss = loss_func(recon_x, one_hot, mu, logvar, pre, label)
loss.backward()
total_loss += loss.data.item() / 1000
thermal_conductivity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(thermal_conductivity_train_loader.dataset)))
thermal_conductivity_total_loss_list[epoch] = total_loss / len(thermal_conductivity_train_loader.dataset)
if np.argmin(thermal_conductivity_total_loss_list) == epoch:
torch.save(heat_capacity_vae_mlp, thermal_conductivity_model_file_name)
print('best result, saving the model to ' + thermal_conductivity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
for epoch in range(params['VAE_epoch_num']):
total_loss = 0
heat_capacity_vae_mlp.train()
for i, data in enumerate(tqdm(heat_capacity_train_loader, 0)):
one_hot = torch.cat((data[0], data[1]), dim=1)
one_hot = one_hot.reshape(one_hot.shape[0], 1, one_hot.shape[1], one_hot.shape[2])
one_hot = Variable(one_hot).cuda().type(torch.cuda.FloatTensor)
heat_capacity_optimizer.zero_grad()
t = data[2].cuda().reshape(data[2].shape[0], 1).type(torch.cuda.FloatTensor)
recon_x, mu, logvar, pre = heat_capacity_vae_mlp.forward(one_hot, t)
recon_x = recon_x[:, :, :one_hot.shape[2], :one_hot.shape[3]]
loss = loss_func(recon_x, one_hot, mu, logvar, pre, t)
loss.backward()
total_loss += loss.data.item() / 1000
heat_capacity_optimizer.step()
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, total_loss / len(heat_capacity_train_loader.dataset)))
heat_capacity_total_loss_list[epoch] = total_loss / len(heat_capacity_train_loader.dataset)
if np.argmin(heat_capacity_total_loss_list) == epoch:
torch.save(heat_capacity_vae_mlp, heat_capacity_model_file_name)
print('best result, saving the model to ' + heat_capacity_model_file_name)
elif np.argmin(thermal_conductivity_total_loss_list) == epoch - 25:
print('Finish: Training process over due to useless training')
break
| 47.161111 | 114 | 0.624102 | [
"MIT"
] | CHUJianchun/VAE_MLP_PSO | VAE_MLP/VAE_MLP_cat_model.py | 8,489 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Corrfunc is a set of high-performance routines for
computing clustering statistics on a distribution of
points.
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import os
__version__ = "0.0.5"
__author__ = "Kate Storey-Fisher <[email protected]>"
try:
__CORRFUNC_SETUP__
except NameError:
__CORRFUNC_SETUP__ = False
if not __CORRFUNC_SETUP__:
from . import io
from . import utils
from . import theory
from . import mocks
def read_text_file(filename, encoding="utf-8"):
"""
Reads a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
"""
try:
with open(filename, 'r', encoding) as f:
r = f.read()
except TypeError:
with open(filename, 'r') as f:
r = f.read()
return r
def write_text_file(filename, contents, encoding="utf-8"):
"""
Writes a file under python3 with encoding (default UTF-8).
Also works under python2, without encoding.
Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp)
principle.
"""
try:
with open(filename, 'w', encoding) as f:
f.write(contents)
except TypeError:
with open(filename, 'w') as f:
f.write(contents)
def which(program, mode=os.F_OK | os.X_OK, path=None):
"""
Mimics the Unix utility which.
For python3.3+, shutil.which provides all of the required functionality.
An implementation is provided in case shutil.which does
not exist.
:param program: (required) string
Name of program (can be fully-qualified path as well)
:param mode: (optional) integer flag bits
Permissions to check for in the executable
Default: os.F_OK (file exists) | os.X_OK (executable file)
:param path: (optional) string
A custom path list to check against. Implementation taken from
shutil.py.
Returns:
A fully qualified path to program as resolved by path or
user environment.
Returns None when program can not be resolved.
"""
try:
from shutil import which as shwhich
return shwhich(program, mode, path)
except ImportError:
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
for pathdir in path:
pathdir = pathdir.strip('"')
exe_file = os.path.join(pathdir, program)
if is_exe(exe_file):
return exe_file
return None
| 28.728972 | 76 | 0.611256 | [
"MIT"
] | dfm/suave | Corrfunc/__init__.py | 3,074 | Python |
"""The token kinds currently recognized."""
from shivyc.tokens import TokenKind
keyword_kinds = []
symbol_kinds = []
bool_kw = TokenKind("_Bool", keyword_kinds)
char_kw = TokenKind("char", keyword_kinds)
short_kw = TokenKind("short", keyword_kinds)
int_kw = TokenKind("int", keyword_kinds)
long_kw = TokenKind("long", keyword_kinds)
signed_kw = TokenKind("signed", keyword_kinds)
unsigned_kw = TokenKind("unsigned", keyword_kinds)
void_kw = TokenKind("void", keyword_kinds)
return_kw = TokenKind("return", keyword_kinds)
if_kw = TokenKind("if", keyword_kinds)
else_kw = TokenKind("else", keyword_kinds)
while_kw = TokenKind("while", keyword_kinds)
for_kw = TokenKind("for", keyword_kinds)
break_kw = TokenKind("break", keyword_kinds)
continue_kw = TokenKind("continue", keyword_kinds)
auto_kw = TokenKind("auto", keyword_kinds)
static_kw = TokenKind("static", keyword_kinds)
extern_kw = TokenKind("extern", keyword_kinds)
struct_kw = TokenKind("struct", keyword_kinds)
union_kw = TokenKind("union", keyword_kinds)
const_kw = TokenKind("const", keyword_kinds)
typedef_kw = TokenKind("typedef", keyword_kinds)
sizeof_kw = TokenKind("sizeof", keyword_kinds)
plus = TokenKind("+", symbol_kinds)
minus = TokenKind("-", symbol_kinds)
star = TokenKind("*", symbol_kinds)
slash = TokenKind("/", symbol_kinds)
mod = TokenKind("%", symbol_kinds)
incr = TokenKind("++", symbol_kinds)
decr = TokenKind("--", symbol_kinds)
equals = TokenKind("=", symbol_kinds)
plusequals = TokenKind("+=", symbol_kinds)
minusequals = TokenKind("-=", symbol_kinds)
starequals = TokenKind("*=", symbol_kinds)
divequals = TokenKind("/=", symbol_kinds)
modequals = TokenKind("%=", symbol_kinds)
twoequals = TokenKind("==", symbol_kinds)
notequal = TokenKind("!=", symbol_kinds)
bool_and = TokenKind("&&", symbol_kinds)
bool_or = TokenKind("||", symbol_kinds)
bool_not = TokenKind("!", symbol_kinds)
lt = TokenKind("<", symbol_kinds)
gt = TokenKind(">", symbol_kinds)
ltoe = TokenKind("<=", symbol_kinds)
gtoe = TokenKind(">=", symbol_kinds)
amp = TokenKind("&", symbol_kinds)
pound = TokenKind("#", symbol_kinds)
lbitshift = TokenKind("<<", symbol_kinds)
rbitshift = TokenKind(">>", symbol_kinds)
compl = TokenKind("~", symbol_kinds)
dquote = TokenKind('"', symbol_kinds)
squote = TokenKind("'", symbol_kinds)
open_paren = TokenKind("(", symbol_kinds)
close_paren = TokenKind(")", symbol_kinds)
open_brack = TokenKind("{", symbol_kinds)
close_brack = TokenKind("}", symbol_kinds)
open_sq_brack = TokenKind("[", symbol_kinds)
close_sq_brack = TokenKind("]", symbol_kinds)
comma = TokenKind(",", symbol_kinds)
semicolon = TokenKind(";", symbol_kinds)
dot = TokenKind(".", symbol_kinds)
arrow = TokenKind("->", symbol_kinds)
identifier = TokenKind()
number = TokenKind()
string = TokenKind()
char_string = TokenKind()
include_file = TokenKind()
| 34.378049 | 50 | 0.736077 | [
"MIT"
] | Arter3r/ShivyC | shivyc/token_kinds.py | 2,819 | Python |
# -*- coding:utf-8 -*-
from os import system
from re import search, findall
from time import sleep
from requests import Session, get, post
from PIL import Image
from cfscrape import get_cookie_string
# from traceback import format_exc
# 功能:请求各大jav网站和arzon的网页
# 参数:网址url,请求头部header/cookies,代理proxy
# 返回:网页html,请求头部
#################################################### arzon ########################################################
# 获取一个arzon_cookie,返回cookie
def steal_arzon_cookies(proxy):
print('\n正在尝试通过“https://www.arzon.jp”的成人验证...')
for retry in range(10):
try: # 当初费尽心机,想办法如何通过页面上的成人验证,结果在一个C#开发的jav爬虫项目,看到它请求以下网址,再跳转到arzon主页,所得到的的cookie即是合法的cookie
if proxy:
session = Session()
session.get('https://www.arzon.jp/index.php?action=adult_customer_agecheck&agecheck=1&redirect=https%3A%2F%2Fwww.arzon.jp%2F', proxies=proxy, timeout=(6, 7))
print('通过arzon的成人验证!\n')
return session.cookies.get_dict()
else:
session = Session()
session.get('https://www.arzon.jp/index.php?action=adult_customer_agecheck&agecheck=1&redirect=https%3A%2F%2Fwww.arzon.jp%2F', timeout=(6, 7))
print('通过arzon的成人验证!\n')
return session.cookies.get_dict()
except:
# print(format_exc())
print('通过失败,重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:https://www.arzon.jp/')
system('pause')
# 搜索arzon,或请求arzon上jav所在网页,返回html
def get_arzon_html(url, cookies, proxy):
# print('代理:', proxy)
for retry in range(10):
try:
if proxy:
rqs = get(url, cookies=cookies, proxies=proxy, timeout=(6, 7))
else:
rqs = get(url, cookies=cookies, timeout=(6, 7))
except:
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'arzon', rqs_content):
return rqs_content
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
def find_plot_arzon(jav_num, acook, proxy_arzon):
for retry in range(2):
url_search_arzon = 'https://www.arzon.jp/itemlist.html?t=&m=all&s=&q=' + jav_num.replace('-', '')
print(' >查找简介:', url_search_arzon)
# 得到arzon的搜索结果页面
html_search_arzon = get_arzon_html(url_search_arzon, acook, proxy_arzon)
# <dt><a href="https://www.arzon.jp/item_1376110.html" title="限界集落 ~村民"><img src=
list_search_results = findall(r'h2><a href="(/item.+?)" title=', html_search_arzon) # 所有搜索结果链接
# 搜索结果为N个AV的界面
if list_search_results: # arzon有搜索结果
for url_each_result in list_search_results:
url_on_arzon = 'https://www.arzon.jp' + url_each_result # 第i+1个链接
print(' >获取简介:', url_on_arzon)
# 打开arzon上每一个搜索结果的页面
html_arzon = get_arzon_html(url_on_arzon, acook, proxy_arzon)
# 在该url_on_arzon网页上查找简介
plotg = search(r'h2>作品紹介</h2>([\s\S]*?)</div>', html_arzon)
# 成功找到plot
if str(plotg) != 'None':
plot_br = plotg.group(1)
plot = ''
for line in plot_br.split('<br />'):
line = line.strip()
plot += line
return plot, 0
# 几个搜索结果查找完了,也没有找到简介
return '【arzon有该影片,但找不到简介】', 1
# 没有搜索结果
else:
# arzon返回的页面实际是18岁验证
adultg = search(r'18歳未満', html_search_arzon)
if str(adultg) != 'None':
acook = steal_arzon_cookies(proxy_arzon)
continue
# 不是成人验证,也没有简介
else:
return '【影片下架,暂无简介】', 2
print('>>请检查你的网络环境是否可以通过成人验证:https://www.arzon.jp/')
system('pause')
return '', 3
#################################################### javlibrary ########################################################
# 获取一个library_cookie,返回cookie
def steal_library_header(url, proxy):
print('\n正在尝试通过', url, '的5秒检测...如果超过20秒卡住...重启程序...')
for retry in range(10):
try:
if proxy:
cookie_value, user_agent = get_cookie_string(url, proxies=proxy, timeout=15)
else:
cookie_value, user_agent = get_cookie_string(url, timeout=15)
print('通过5秒检测!\n')
return {'User-Agent': user_agent, 'Cookie': cookie_value}
except:
# print(format_exc())
print('通过失败,重新尝试...')
continue
print('>>通过javlibrary的5秒检测失败:', url)
system('pause')
# 搜索javlibrary,或请求javlibrary上jav所在网页,返回html
def get_library_html(url, header, proxy):
for retry in range(10):
try:
if proxy:
rqs = get(url, headers=header, proxies=proxy, timeout=(6, 7), allow_redirects=False)
else:
rqs = get(url, headers=header, timeout=(6, 7), allow_redirects=False)
except:
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
# print(rqs_content)
if search(r'JAVLibrary', rqs_content): # 得到想要的网页,直接返回
return rqs_content, header
elif search(r'javli', rqs_content): # 搜索车牌后,javlibrary跳转前的网页
url = url[:23] + search(r'(\?v=javli.+?)"', rqs_content).group(1) # rqs_content是一个非常简短的跳转网页,内容是目标jav所在网址
if len(url) > 70: # 跳转车牌特别长,cf已失效
header = steal_library_header(url[:23], proxy) # 更新header后继续请求
continue
print(' >获取信息:', url)
continue # 更新url后继续get
elif search(r'Compatible', rqs_content): # cf检测
header = steal_library_header(url[:23], proxy) # 更新header后继续请求
continue
else: # 代理工具返回的错误信息
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
#################################################### javbus ########################################################
# 搜索javbus,或请求javbus上jav所在网页,返回html
def get_bus_html(url, proxy):
for retry in range(10):
try:
if proxy: # existmag=all为了 获得所有影片,而不是默认的有磁力的链接
rqs = get(url, proxies=proxy, timeout=(6, 7), headers={'Cookie': 'existmag=all'})
else:
rqs = get(url, timeout=(6, 7), headers={'Cookie': 'existmag=all'})
except:
# print(format_exc())
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'JavBus', rqs_content):
return rqs_content
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
# 去javbus搜寻系列
def find_series_cover_bus(jav_num, url_bus, proxy_bus):
# 需要这两个东西
series = url_cover_bus = ''
status_series = 0
# 在javbus上找图片url
url_on_bus = url_bus + jav_num
print(' >获取系列:', url_on_bus)
# 获得影片在javbus上的网页
html_bus = get_bus_html(url_on_bus, proxy_bus)
if not search(r'404 Page', html_bus):
# DVD封面cover
coverg = search(r'bigImage" href="(.+?)">', html_bus)
if str(coverg) != 'None':
url_cover_bus = coverg.group(1)
# 系列:</span> <a href="https://www.cdnbus.work/series/kpl">悪質シロウトナンパ</a>
seriesg = search(r'系列:</span> <a href=".+?">(.+?)</a>', html_bus)
if str(seriesg) != 'None':
series = seriesg.group(1)
else:
# 还是老老实实去搜索
url_search_bus = url_bus + 'search/' + jav_num.replace('-', '') + '&type=1&parent=ce'
print(' >搜索javbus:', url_search_bus)
html_bus = get_bus_html(url_search_bus, proxy_bus)
# 搜索结果的网页,大部分情况一个结果,也有可能是多个结果的网页
# 尝试找movie-box
list_search_results = findall(r'movie-box" href="(.+?)">', html_bus) # 匹配处理“标题”
if list_search_results:
jav_pref = jav_num.split('-')[0] # 匹配车牌的前缀字母
jav_suf = jav_num.split('-')[-1].lstrip('0') # 当前车牌的后缀数字 去除多余的0
list_fit_results = [] # 存放,车牌符合的结果
for i in list_search_results:
url_end = i.split('/')[-1].upper()
url_suf = search(r'[-_](\d+)', url_end).group(1).lstrip('0') # 匹配box上影片url,车牌的后缀数字,去除多余的0
if jav_suf == url_suf: # 数字相同
url_pref = search(r'([A-Z]+2?8?)', url_end).group(1).upper() # 匹配处理url所带车牌前面的字母“n”
if jav_pref == url_pref: # 数字相同的基础下,字母也相同,即可能车牌相同
list_fit_results.append(i)
# 有结果
if list_fit_results:
# 有多个结果,发个状态码,警告一下用户
if len(list_fit_results) > 1:
status_series = 1
# 默认用第一个搜索结果
url_first_result = list_fit_results[0]
print(' >获取系列:', url_first_result)
html_bus = get_bus_html(url_first_result, proxy_bus)
# DVD封面cover
coverg = search(r'bigImage" href="(.+?)">', html_bus)
if str(coverg) != 'None':
url_cover_bus = coverg.group(1)
# 系列:</span> <a href="https://www.cdnbus.work/series/kpl">悪質シロウトナンパ</a>
seriesg = search(r'系列:</span> <a href=".+?">(.+?)</a>', html_bus)
if str(seriesg) != 'None':
series = seriesg.group(1)
return url_cover_bus, series, status_series
#################################################### jav321 ########################################################
# 用户指定jav321的网址后,请求jav所在网页,返回html
def get_321_html(url, proxy):
for retry in range(10):
try:
if proxy:
rqs = get(url, proxies=proxy, timeout=(6, 7))
else:
rqs = get(url, timeout=(6, 7))
except:
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'JAV321', rqs_content):
return rqs_content
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
# 向jav321 post车牌,得到jav所在网页,也可能是无结果的网页,返回html
def post_321_html(url, data, proxy):
for retry in range(10):
try:
if proxy:
rqs = post(url, data=data, proxies=proxy, timeout=(6, 7))
else:
rqs = post(url, data=data, timeout=(6, 7))
except:
# print(format_exc())
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'JAV321', rqs_content):
return rqs_content
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
#################################################### javdb ########################################################
# 搜索javdb,得到搜索结果网页,返回html。
def get_search_db_html(url, proxy):
for retry in range(1, 11):
if retry % 4 == 0:
print(' >睡眠5分钟...')
sleep(300)
try:
if proxy:
rqs = get(url, proxies=proxy, timeout=(6, 7))
else:
rqs = get(url, timeout=(6, 7))
except:
# print(format_exc())
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'JavDB', rqs_content):
if search(r'搜索結果', rqs_content):
return rqs_content
else:
print(' >睡眠5分钟...')
sleep(300)
continue
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
# 请求jav在javdb上的网页,返回html
def get_db_html(url, proxy):
for retry in range(1, 11):
if retry % 4 == 0:
print(' >睡眠5分钟...')
sleep(300)
try:
if proxy:
rqs = get(url, proxies=proxy, timeout=(6, 7))
else:
rqs = get(url, timeout=(6, 7))
except:
# print(format_exc())
print(' >打开网页失败,重新尝试...')
continue
rqs.encoding = 'utf-8'
rqs_content = rqs.text
if search(r'JavDB', rqs_content):
if search(r'content="JavDB', rqs_content):
return rqs_content
else:
print(' >睡眠5分钟...')
sleep(300)
continue
else:
print(' >打开网页失败,空返回...重新尝试...')
continue
print('>>请检查你的网络环境是否可以打开:', url)
system('pause')
#################################################### 下载图片 ########################################################
# 下载图片,无返回
def download_pic(url, path, proxy):
for retry in range(5):
try:
if proxy:
r = get(url, proxies=proxy, stream=True, timeout=(6, 10))
with open(path, 'wb') as pic:
for chunk in r:
pic.write(chunk)
else:
r = get(url, stream=True, timeout=(6, 10))
with open(path, 'wb') as pic:
for chunk in r:
pic.write(chunk)
except:
# print(format_exc())
print(' >下载失败,重新下载...')
continue
# 如果下载的图片打不开,则重新下载
try:
img = Image.open(path)
img.load()
return
except OSError:
print(' >下载失败,重新下载....')
continue
raise Exception(' >下载多次,仍然失败!')
| 38.307692 | 174 | 0.484974 | [
"MIT"
] | Aevlp/javsdt | functions_requests.py | 16,888 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to polis.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "polis.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
| 38.559322 | 129 | 0.643956 | [
"MIT"
] | CryptoBackups/polis | qa/rpc-tests/multi_rpc.py | 4,550 | Python |
from __future__ import unicode_literals
import unittest
from django.conf.urls import url
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.test import override_settings
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from rest_framework.compat import is_authenticated, requests
from rest_framework.response import Response
from rest_framework.test import APITestCase, RequestsClient
from rest_framework.views import APIView
class Root(APIView):
def get(self, request):
return Response({
'method': request.method,
'query_params': request.query_params,
})
def post(self, request):
files = {
key: (value.name, value.read())
for key, value in request.FILES.items()
}
post = request.POST
json = None
if request.META.get('CONTENT_TYPE') == 'application/json':
json = request.data
return Response({
'method': request.method,
'query_params': request.query_params,
'POST': post,
'FILES': files,
'JSON': json
})
class HeadersView(APIView):
def get(self, request):
headers = {
key[5:].replace('_', '-'): value
for key, value in request.META.items()
if key.startswith('HTTP_')
}
return Response({
'method': request.method,
'headers': headers
})
class SessionView(APIView):
def get(self, request):
return Response({
key: value for key, value in request.session.items()
})
def post(self, request):
for key, value in request.data.items():
request.session[key] = value
return Response({
key: value for key, value in request.session.items()
})
class AuthView(APIView):
@method_decorator(ensure_csrf_cookie)
def get(self, request):
if is_authenticated(request.user):
username = request.user.username
else:
username = None
return Response({
'username': username
})
@method_decorator(csrf_protect)
def post(self, request):
username = request.data['username']
password = request.data['password']
user = authenticate(username=username, password=password)
if user is None:
return Response({'error': 'incorrect credentials'})
login(request, user)
return redirect('/auth/')
urlpatterns = [
url(r'^$', Root.as_view(), name='root'),
url(r'^headers/$', HeadersView.as_view(), name='headers'),
url(r'^session/$', SessionView.as_view(), name='session'),
url(r'^auth/$', AuthView.as_view(), name='auth'),
]
@unittest.skipUnless(requests, 'requests not installed')
@override_settings(ROOT_URLCONF='tests.test_requests_client')
class RequestsClientTests(APITestCase):
def test_get_request(self):
client = RequestsClient()
response = client.get('http://testserver/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'GET',
'query_params': {}
}
assert response.json() == expected
def test_get_request_query_params_in_url(self):
client = RequestsClient()
response = client.get('http://testserver/?key=value')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'GET',
'query_params': {'key': 'value'}
}
assert response.json() == expected
def test_get_request_query_params_by_kwarg(self):
client = RequestsClient()
response = client.get('http://testserver/', params={'key': 'value'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'GET',
'query_params': {'key': 'value'}
}
assert response.json() == expected
def test_get_with_headers(self):
client = RequestsClient()
response = client.get('http://testserver/headers/', headers={'User-Agent': 'example'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
headers = response.json()['headers']
assert headers['USER-AGENT'] == 'example'
def test_get_with_session_headers(self):
client = RequestsClient()
client.headers.update({'User-Agent': 'example'})
response = client.get('http://testserver/headers/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
headers = response.json()['headers']
assert headers['USER-AGENT'] == 'example'
def test_post_form_request(self):
client = RequestsClient()
response = client.post('http://testserver/', data={'key': 'value'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'POST',
'query_params': {},
'POST': {'key': 'value'},
'FILES': {},
'JSON': None
}
assert response.json() == expected
def test_post_json_request(self):
client = RequestsClient()
response = client.post('http://testserver/', json={'key': 'value'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'POST',
'query_params': {},
'POST': {},
'FILES': {},
'JSON': {'key': 'value'}
}
assert response.json() == expected
def test_post_multipart_request(self):
client = RequestsClient()
files = {
'file': ('report.csv', 'some,data,to,send\nanother,row,to,send\n')
}
response = client.post('http://testserver/', files=files)
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'method': 'POST',
'query_params': {},
'FILES': {'file': ['report.csv', 'some,data,to,send\nanother,row,to,send\n']},
'POST': {},
'JSON': None
}
assert response.json() == expected
def test_session(self):
client = RequestsClient()
response = client.get('http://testserver/session/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {}
assert response.json() == expected
response = client.post('http://testserver/session/', json={'example': 'abc'})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {'example': 'abc'}
assert response.json() == expected
response = client.get('http://testserver/session/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {'example': 'abc'}
assert response.json() == expected
def test_auth(self):
# Confirm session is not authenticated
client = RequestsClient()
response = client.get('http://testserver/auth/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'username': None
}
assert response.json() == expected
assert 'csrftoken' in response.cookies
csrftoken = response.cookies['csrftoken']
user = User.objects.create(username='tom')
user.set_password('password')
user.save()
# Perform a login
response = client.post('http://testserver/auth/', json={
'username': 'tom',
'password': 'password'
}, headers={'X-CSRFToken': csrftoken})
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'username': 'tom'
}
assert response.json() == expected
# Confirm session is authenticated
response = client.get('http://testserver/auth/')
assert response.status_code == 200
assert response.headers['Content-Type'] == 'application/json'
expected = {
'username': 'tom'
}
assert response.json() == expected
| 34.338521 | 94 | 0.592521 | [
"BSD-2-Clause"
] | Asset-Map/django-rest-framework | tests/test_requests_client.py | 8,825 | Python |
from tkinter import *
from tkinter import messagebox
import tkinter.ttk as ttk
import datetime
def init(top, gui):
global w, top_level, root
w = gui
top_level = top
root = top
def start_gui():
global w, root
root = Tk()
root.iconbitmap("index.ico")
top = main_level(root)
init(root, top)
root.mainloop()
w = None
def new_top(root):
global w, rt
rt = root
w = Toplevel(root)
top = main_level(w)
init(w, top)
return w, top
def exit():
qExit=messagebox.askyesno(" Quit System ", " Do you really want to Exit ? ")
if qExit > 0:
root.destroy()
return
bus_stops = {'MG Road': ['Koramangala','HSR Layout','Sarjapur road'],
'Koramangala': ['MG Road','HSR Layout','Sarjapur road'],
'HSR Layout': ['MG Road','Koramangala','Sarjapur road'],
'Sarjapur road' : ['MG Road','Koramangala','HSR Layout']}
class main_level:
def bus_Stops(self, top=None):
self.des_Combo['values'] = bus_stops[self.boarding_Combo.get()]
def booked(self):
#self.select()
#pass
if self.boarding_Combo.get() == "Select Boarding":
messagebox.showerror("Unknown Boarding", " Please select your Boarding")
return
elif self.des_Combo.get() == "Select Destination":
messagebox.showerror("Unknown Destination", "Please Select your Destination")
return
elif self.no_of_Adult.get() == "Select Adult(s)":
messagebox.showerror("No Adult Selected", " Please select Adult(s)")
return
elif self.no_of_Child.get() == "Select Child(s)":
messagebox.showerror("No Child Selected", " Please select Child(s)")
return
elif self.boarding_Combo.get() == self.des_Combo.get():
messagebox.showerror("Error", "Boarding and Destination cannot be same!")
return
qtotal = messagebox.askyesno("Total Cost Check", " Do you want to check total cost")
if qtotal > 0:
messagebox.showinfo("Total Cost Check","Check total fare by clicking Total Button")
return
qPrint = messagebox.askyesno("Confirmation", "Ticket Booked!\n\n Print Ticket ?")
if qPrint > 0:
messagebox.showinfo("Booked","Ticket Booked\n\n"+"Printing Ticket\n\n " + self.boarding_Combo.get() + " To " + self.des_Combo.get() + "\n\n" + " For " + self.no_of_Adult.get() + " Adult(s)" + " and " + self.no_of_Child.get() + " Child(s)")
# for sql table
global na
na = self.no_of_Adult.get()
global nc
nc = self.no_of_Child.get()
global bb
bb = '\'{}\''.format(self.boarding_Combo.get())
global dc
dc = '\'{}\''.format(self.des_Combo.get())
self.from2.configure(text="")
self.to2.configure(text="")
self.no_of_Adults2.configure(text="")
self.no_of_Child2.configure(text="")
self.tCost2.configure(text="")
self.no_of_Adult.set("Select Adult(s)")
self.no_of_Child.set("Select Child(s)")
self.boarding_Combo.set("Select Boarding")
self.des_Combo.set("Select Destination")
Date1 = StringVar()
now = datetime.datetime.now()
Date1.set(now.strftime("%d-%m-%Y %I:%M:%S:%p"))
self.lbltiming2.configure(textvariable=Date1)
ticket_num = 1
ticket_nu = str(ticket_num)
f = open("Tickets.txt", "a")
f.write(ticket_nu)
f.close()
with open('Tickets.txt') as infile:
characters = 0
for lineno, line in enumerate(infile, 1):
wordslist = line.split()
characters += sum(len(word) for word in wordslist)
ticketno = StringVar()
ticket_number = characters
ticketno.set(ticket_number)
self.tno.configure(textvariable=ticketno)
def reset(self):
self.from2.configure(text="")
self.to2.configure(text="")
self.no_of_Adults2.configure(text="")
self.no_of_Child2.configure(text="")
self.tCost2.configure(text="")
self.no_of_Adult.set("Select Adult(s)")
self.no_of_Child.set("Select Child(s)")
self.boarding_Combo.set("Select Boarding")
self.des_Combo.set("Select Destination")
Date1 = StringVar()
now = datetime.datetime.now()
Date1.set(now.strftime("%d-%m-%Y %I:%M:%S:%p"))
self.lbltiming2.configure(textvariable=Date1)
def travel_Cost(self):
if self.boarding_Combo.get() == "Select Boarding":
messagebox.showerror("Unknown Boarding", " Please select your Boarding")
return
elif self.des_Combo.get() == "Select Destination":
messagebox.showerror("Unknown Destination", "Please Select your Destination")
return
elif self.no_of_Adult.get() == "Select Adult(s)":
messagebox.showerror("No Adult Selected", " Please select Adult(s)")
return
elif self.no_of_Child.get() == "Select Child(s)":
messagebox.showerror("No Child Selected", " Please select Child(s)")
return
elif self.boarding_Combo.get() == self.des_Combo.get():
messagebox.showerror("Error", "Boarding and Destination cannot be same!")
return
self.from2.configure(text="" + self.boarding_Combo.get())
self.to2.configure(text="" + self.des_Combo.get())
self.no_of_Adults2.configure(text="" + self.no_of_Adult.get())
self.no_of_Child2.configure(text="" + self.no_of_Child.get())
#-------------------------------------Total Ticket Cost----------------------------------------------------------
cost = 0
calculated_cost = float()
if (self.no_of_Adult.get() == "0"):
calculated_cost = cost + 0
elif (self.no_of_Adult.get() == "1"):
calculated_cost = cost + 1
elif (self.no_of_Adult.get() == "2"):
calculated_cost = cost + 2
elif (self.no_of_Adult.get() == "3"):
calculated_cost = cost + 3
aticket = calculated_cost
child_cost = 0
c_cost = float()
if self.no_of_Child.get() == "0":
c_cost = child_cost + 0
elif self.no_of_Child.get() == "1":
c_cost = child_cost + 1 / 2
elif self.no_of_Child.get() == "2":
c_cost = child_cost + 2 / 2
elif self.no_of_Child.get() == "3":
c_cost = child_cost + 3 / 2
cticket = c_cost
passenger = cticket + aticket
fare = 0
t_fare = float()
if (self.boarding_Combo.get() == "MG Road") and (self.des_Combo.get() == "Koramangala"):
t_fare = fare + 25
elif (self.boarding_Combo.get() == "HSR Layout") and (self.des_Combo.get() == "Koramangala"):
t_fare = fare + 22
elif (self.boarding_Combo.get() == "Sarjapur road") and (self.des_Combo.get() == "Koramangala"):
t_fare = fare + 20
elif (self.boarding_Combo.get() == "Koramangala") and (self.des_Combo.get() == "HSR Layout"):
t_fare = fare + 17
elif (self.boarding_Combo.get() == "Sarjapur road") and (self.des_Combo.get() == "HSR Layout"):
t_fare = fare + 15
elif (self.boarding_Combo.get() == "MG Road") and (self.des_Combo.get() == "HSR Layout"):
t_fare = fare + 11
elif (self.boarding_Combo.get() == "Koramangala") and (self.des_Combo.get() == "Sarjapur road"):
t_fare = fare + 9
elif (self.boarding_Combo.get() == "HSR Layout") and (self.des_Combo.get() == "Sarjapur road"):
t_fare = fare + 22
elif (self.boarding_Combo.get() == "MG Road") and (self.des_Combo.get() == "Sarjapur road"):
t_fare = fare + 20
elif (self.boarding_Combo.get() == "Sarjapur road") and (self.des_Combo.get() == "MG Road"):
t_fare = fare + 17
elif (self.boarding_Combo.get() == "HSR Layout") and (self.des_Combo.get() == "MG Road"):
t_fare = fare + 15
elif (self.boarding_Combo.get() == "Koramangala") and (self.des_Combo.get() == "MG Road"):
t_fare = fare + 11
total_fare = t_fare
global final_price
final_price = (total_fare * passenger)
self.tCost2.configure(text=final_price)
return
#-------------------------------Defining all the specifications for the TKinter Frontend -----------------------------
def __init__(self, top=None):
top.geometry("732x443+327+147")
top.title("Bus Ticketing System")
self.style = ttk.Style()
font10 = "-family {Wide Latin} -size 10 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
font15 = "-family {Snap ITC} -size 20 -weight bold -slant " \
"roman -underline 1 -overstrike 0"
font17 = "-family {Segoe UI} -size 10 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
font18 = "-family {Segoe UI} -size 11 -weight bold -slant " \
"roman -underline 0 -overstrike 0"
Date1 = StringVar()
now = datetime.datetime.now()
Date1.set(now.strftime("%d-%m-%Y %I:%M:%S:%p"))
with open('Tickets.txt') as infile:
characters = 0
for lineno, line in enumerate(infile, 1):
wordslist = line.split()
characters += sum(len(word) for word in wordslist)
ticketno = StringVar()
ticket_number = characters
ticketno.set(ticket_number)
bg = PhotoImage(file = r"C:\bus4.png")
self.frame_Booking_Panel = Frame(top)
self.frame_Booking_Panel.place(relx=0.0, rely=0.0, relheight=1.0, relwidth=1.0)
self.frame_Booking_Panel.configure(relief=GROOVE)
self.frame_Booking_Panel.configure(borderwidth="5")
self.frame_Booking_Panel.configure(background ='#ADD8E6')
self.back_ground = Label(self.frame_Booking_Panel,image = bg)
self.back_ground.img = bg
self.back_ground.place(relx=0.14, rely=0.05, height=732, width=443)
self.back_ground.pack()
self.bus_Service = Label(self.frame_Booking_Panel)
self.bus_Service.place(relx=0.14, rely=0.05, height=21, width=544)
self.bus_Service.configure(background="#ADD8E6")
self.bus_Service.configure(font=font15)
self.bus_Service.configure(text="Bus Service")
self.ticket_No = Label(self.frame_Booking_Panel)
self.ticket_No.place(relx=0.04, rely=0.18, height=21, width=84)
self.ticket_No.configure(background="#E6E6FA")
self.ticket_No.configure(font=font18)
self.ticket_No.configure(text="Ticket No. : ")
self.tno = Label(self.frame_Booking_Panel)
self.tno.place(relx=0.15, rely=0.18, height=21, width=30)
self.tno.configure(background="#E6E6FA")
self.tno.configure(font=font18)
self.tno.configure(textvariable=ticketno)
self.bus_id = Label(self.frame_Booking_Panel)
self.bus_id.place(relx=0.75, rely=0.16, height=21, width=119)
self.bus_id.configure(background="#E6E6FA")
self.bus_id.configure(font=font18)
self.bus_id.configure(text="Bus : JFBS0001")
self.boarding_lbl = Label(self.frame_Booking_Panel)
self.boarding_lbl.place(relx=0.04, rely=0.32, height=23, width=84)
self.boarding_lbl.configure(background="#DCAE96")
self.boarding_lbl.configure(font=font17)
self.boarding_lbl.configure(text="Boarding :")
self.destination_lbl = Label(self.frame_Booking_Panel)
self.destination_lbl.place(relx=0.52, rely=0.32, height=21, width=134)
self.destination_lbl.configure(background="#DCAE96")
self.destination_lbl.configure(font=font18)
self.destination_lbl.configure(text="Destination :")
self.boarding_Combo = ttk.Combobox(self.frame_Booking_Panel, state='readonly', values=list(bus_stops.keys()))
self.boarding_Combo.place(relx=0.19, rely=0.32, relheight=0.05, relwidth=0.2)
self.boarding_Combo.set("Select Boarding")
self.boarding_Combo.bind('<<ComboboxSelected>>', self.bus_Stops)
self.des_Combo = ttk.Combobox(self.frame_Booking_Panel, state='readonly')
self.des_Combo.place(relx=0.74, rely=0.32, relheight=0.05, relwidth=0.2)
self.des_Combo.bind('<<ComboboxSelected>>')
self.des_Combo.set("Select Destination")
self.passengers_lbl = Label(self.frame_Booking_Panel)
self.passengers_lbl.place(relx=0.04, rely=0.47, height=21, width=114)
self.passengers_lbl.configure(background="#E6E6FA")
self.passengers_lbl.configure(font=font18)
self.passengers_lbl.configure(text="Passenger(s) :")
self.no_of_Adult = ttk.Combobox(self.frame_Booking_Panel, state='readonly')
self.no_of_Adult.place(relx=0.16, rely=0.56, relheight=0.05, relwidth=0.15)
self.no_of_Adult['values'] = (0, 1, 2, 3)
self.no_of_Adult.set("Select Adult(s)")
self.no_of_Adult.bind('<<ComboboxSelected>>')
self.no_of_Child = ttk.Combobox(self.frame_Booking_Panel, state='readonly')
self.no_of_Child.place(relx=0.16, rely=0.65, relheight=0.05, relwidth=0.15)
self.no_of_Child['values'] = (0, 1, 2, 3)
self.no_of_Child.set("Select Child(s)")
self.no_of_Child.bind('<<ComboboxSelected>>')
self.book_Button = Button(self.frame_Booking_Panel)
self.book_Button.place(relx=0.78, rely=0.62, height=24, width=107)
self.book_Button.configure(background="#008040")
self.book_Button.configure(font=font17)
self.book_Button.configure(text="Book")
self.book_Button.configure(command=self.booked)
self.exit_Button = Button(self.frame_Booking_Panel)
self.exit_Button.place(relx=0.78, rely=0.92, height=24, width=107)
self.exit_Button.configure(background="#008040")
self.exit_Button.configure(font=font17)
self.exit_Button.configure(text="Exit")
self.exit_Button.configure(command=exit)
self.reset_Button = Button(self.frame_Booking_Panel)
self.reset_Button.place(relx=0.78, rely=0.77, height=24, width=107)
self.reset_Button.configure(background="#008040")
self.reset_Button.configure(font=font17)
self.reset_Button.configure(text="Reset")
self.reset_Button.configure(command=self.reset)
self.total_Button = Button(self.frame_Booking_Panel)
self.total_Button.place(relx=0.78, rely=0.47, height=24, width=107)
self.total_Button.configure(background="#008040")
self.total_Button.configure(font=font17)
self.total_Button.configure(text="Total")
self.total_Button.configure(command=self.travel_Cost)
self.lblAdultno = Label(self.frame_Booking_Panel)
self.lblAdultno.place(relx=0.05, rely=0.56, height=21, width=64)
self.lblAdultno.configure(background="#ff8040")
self.lblAdultno.configure(text="Adult")
self.lblChildno = Label(self.frame_Booking_Panel)
self.lblChildno.place(relx=0.05, rely=0.65, height=21, width=64)
self.lblChildno.configure(background="#ff8040")
self.lblChildno.configure(text="Child")
self.total_Frame = Frame(self.frame_Booking_Panel)
self.total_Frame.place(relx=0.36, rely=0.47, relheight=0.44, relwidth=0.36)
self.total_Frame.configure(relief=GROOVE)
self.total_Frame.configure(borderwidth="1")
self.total_Frame.configure(background="#ADD8E6")
self.from1 = Label(self.total_Frame)
self.from1.place(relx=0.08, rely=0.05, height=21, width=54)
self.from1.configure(background="#0080ff")
self.from1.configure(text="From :")
self.from2 = Label(self.total_Frame)
self.from2.place(relx=0.40, rely=0.05, height=21, width=121)
self.from2.configure(background="#8080ff")
self.from2.configure(highlightcolor="black")
self.to1 = Label(self.total_Frame)
self.to1.place(relx=0.08, rely=0.21, height=21, width=49)
self.to1.configure(background="#0080ff")
self.to1.configure(text="To :")
self.to2 = Label(self.total_Frame)
self.to2.place(relx=0.40, rely=0.21, height=21, width=121)
self.to2.configure(background="#8080ff")
self.no_of_Adults1 = Label(self.total_Frame)
self.no_of_Adults1.place(relx=0.08, rely=0.36, height=21, width=55)
self.no_of_Adults1.configure(background="#0080ff")
self.no_of_Adults1.configure(text="Adult :")
self.no_of_Adults2 = Label(self.total_Frame)
self.no_of_Adults2.place(relx=0.40, rely=0.36, height=21, width=121)
self.no_of_Adults2.configure(background="#8080ff")
self.no_of_Child1 = Label(self.total_Frame)
self.no_of_Child1.place(relx=0.08, rely=0.51, height=21, width=46)
self.no_of_Child1.configure(background="#0080ff")
self.no_of_Child1.configure(text="Child :")
self.no_of_Child2 = Label(self.total_Frame)
self.no_of_Child2.place(relx=0.40, rely=0.51, height=21, width=121)
self.no_of_Child2.configure(background="#8080ff")
self.tCost1 = Label(self.total_Frame)
self.tCost1.place(relx=0.08, rely=0.67, height=21, width=74)
self.tCost1.configure(background="#0080ff")
self.tCost1.configure(text="Total (Rs.) :")
self.tCost2 = Label(self.total_Frame)
self.tCost2.place(relx=0.40, rely=0.67, height=21, width=121)
self.tCost2.configure(background="#8080ff")
self.lbltiming1 = Label(self.total_Frame)
self.lbltiming1.place(relx=0.08, rely=0.82, height=21, width=74)
self.lbltiming1.configure(background="#0080ff")
self.lbltiming1.configure(text="Booking at :")
self.lbltiming2 = Label(self.total_Frame)
self.lbltiming2.place(relx=0.40, rely=0.82, height=21, width=135)
self.lbltiming2.configure(background="#8080ff")
self.lbltiming2.configure(textvariable=Date1)
start_gui()
#----------------------------------------SQL PART --------------------------------------------------
import mysql.connector as ms
conn = ms.connect(host = "localhost", user = "root", passwd="", database ="project")
if conn.is_connected():
print("Connected Succefully")
else:
print("Try again....")
cursor = conn.cursor()
#date for sql table
now = datetime.datetime.now()
formatted_date = now.strftime('%Y-%m-%d')
f1 = '\'{}\''.format(formatted_date)
#checking whether all values r good
print(f1,na,nc,bb,dc,final_price)
str1 = "insert into bus_data values({},{},{},{},{},{});".format(f1,na,nc,bb,dc,final_price)
cursor.execute(str1)
conn.commit()
| 41.738758 | 259 | 0.598605 | [
"MIT"
] | SoldierSolo24/Bus-Ticketing-System- | Bus Ticketing system MAIN final.py | 19,492 | Python |
import jinja2
class SPMObject(object):
""" Abstract Base Class for all SPM objects.
Even though SPM objects are not Spire tasks (as some of them will modify
in-place their file_dep, which is not compatible with doit's task
semantics), they nonetheless include task-related properties: file_dep
and targets. Subclasses will have to override the _get_file_dep and
_get_targets functions to return the correct values.
"""
def __init__(self, name):
self.name = name
self.environment = jinja2.Environment()
self.environment.globals.update(id=__class__._get_id)
def get_script(self, index):
template = self.environment.from_string(self.template)
return template.render(index=index, **vars(self))
@property
def file_dep(self):
return self._get_file_dep()
@property
def targets(self):
return self._get_targets()
@staticmethod
def _get_id(index, name):
return "matlabbatch{"+str(index)+"}."+name
def _get_file_dep(self):
return []
def _get_targets(self):
return []
def __getstate__(self):
state = self.__dict__.copy()
del state["environment"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.environment = jinja2.Environment()
self.environment.globals.update(id=__class__._get_id)
| 30.204082 | 80 | 0.641216 | [
"MIT"
] | lamyj/spire | spire/spm/spm_object.py | 1,480 | Python |
{
"name": """POS: Refunds analysis""",
"summary": """Waiter specifies refund reason to avoid serving mistakes in future.""",
"category": "Point of Sale",
"images": ["images/pos_order_cancel_restaurant.png"],
"version": "13.0.1.5.0",
"application": False,
"author": "IT-Projects LLC, Dinar Gabbasov",
"support": "[email protected]",
"website": "https://apps.odoo.com/apps/modules/13.0/pos_order_cancel_restaurant/",
"license": "Other OSI approved licence", # MIT
"price": 25.00,
"currency": "EUR",
"depends": ["pos_order_cancel", "pos_restaurant_base"],
"external_dependencies": {"python": [], "bin": []},
"data": ["views/template.xml", "views/views.xml"],
"qweb": ["static/src/xml/cancel_order.xml"],
"demo": ["data/pos_cancelled_reason_demo.xml"],
"post_load": None,
"pre_init_hook": None,
"post_init_hook": None,
"auto_install": False,
"installable": False,
}
| 38.16 | 89 | 0.631027 | [
"MIT"
] | BrayhanJC/pos-addons | pos_order_cancel_restaurant/__manifest__.py | 954 | Python |
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeElements(self, head, val):
"""
:type head: ListNode
:type val: int
:rtype: ListNode
"""
while head:
if head.val == val:
head = head.next
else:
break
if not head:
return head
cur = head
pre = cur
while cur:
if cur.val != val:
pre = cur
cur = cur.next
else:
cur = cur.next
pre.next = cur
return head
| 20.647059 | 40 | 0.428775 | [
"Apache-2.0"
] | sonymoon/algorithm | src/main/python/leetcode-python/easy/203.Remove Linked List Elements.py | 702 | Python |
# author : Sam Rapier
from deploy_django_to_azure.settings.base import *
DEBUG = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = 'https://exeblobstorage.blob.core.windows.net/static-files/'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'sql_server.pyodbc',
'NAME': 'DB-exeterOrientation',
'USER': 'user-admin',
'PASSWORD': 'v%mRn3os#9P2JnjnV*dJ',
'HOST': 'db-exeter-orientation.database.windows.net',
'PORT': '1433',
'OPTIONS': {
'driver': 'ODBC Driver 17 for SQL Server',
'MARS_Connection': 'True',
}
}
} | 26.571429 | 73 | 0.625 | [
"MIT"
] | Thoma1999/exe_orientation_Q | deploy_django_to_azure/settings/production.py | 744 | Python |
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SwitchConfigLearnedInformation(Base):
"""NOT DEFINED
The SwitchConfigLearnedInformation class encapsulates a list of switchConfigLearnedInformation resources that are managed by the system.
A list of resources can be retrieved from the server using the SwitchConfigLearnedInformation.find() method.
"""
__slots__ = ()
_SDM_NAME = 'switchConfigLearnedInformation'
_SDM_ATT_MAP = {
'ConfigFlags': 'configFlags',
'DataPathId': 'dataPathId',
'DataPathIdAsHex': 'dataPathIdAsHex',
'ErrorCode': 'errorCode',
'ErrorType': 'errorType',
'Latency': 'latency',
'LocalIp': 'localIp',
'MissSendLength': 'missSendLength',
'NegotiatedVersion': 'negotiatedVersion',
'RemoteIp': 'remoteIp',
'ReplyState': 'replyState',
}
def __init__(self, parent):
super(SwitchConfigLearnedInformation, self).__init__(parent)
@property
def ConfigFlags(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigFlags'])
@property
def DataPathId(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DataPathId'])
@property
def DataPathIdAsHex(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex'])
@property
def ErrorCode(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ErrorCode'])
@property
def ErrorType(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ErrorType'])
@property
def Latency(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['Latency'])
@property
def LocalIp(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def MissSendLength(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['MissSendLength'])
@property
def NegotiatedVersion(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['NegotiatedVersion'])
@property
def RemoteIp(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteIp'])
@property
def ReplyState(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ReplyState'])
def find(self, ConfigFlags=None, DataPathId=None, DataPathIdAsHex=None, ErrorCode=None, ErrorType=None, Latency=None, LocalIp=None, MissSendLength=None, NegotiatedVersion=None, RemoteIp=None, ReplyState=None):
"""Finds and retrieves switchConfigLearnedInformation resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchConfigLearnedInformation resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all switchConfigLearnedInformation resources from the server.
Args
----
- ConfigFlags (str): NOT DEFINED
- DataPathId (str): NOT DEFINED
- DataPathIdAsHex (str): NOT DEFINED
- ErrorCode (str): NOT DEFINED
- ErrorType (str): NOT DEFINED
- Latency (number): NOT DEFINED
- LocalIp (str): NOT DEFINED
- MissSendLength (number): NOT DEFINED
- NegotiatedVersion (str): NOT DEFINED
- RemoteIp (str): NOT DEFINED
- ReplyState (str): NOT DEFINED
Returns
-------
- self: This instance with matching switchConfigLearnedInformation resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of switchConfigLearnedInformation data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the switchConfigLearnedInformation resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| 32.611111 | 213 | 0.636519 | [
"MIT"
] | Vibaswan/ixnetwork_restpy | ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchconfiglearnedinformation_e983ca5da0eadbba93d1ce1d2903a5b7.py | 6,457 | Python |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Claim a Device to a Site data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator5889Fb844939A13B(object):
"""Claim a Device to a Site request schema definition."""
def __init__(self):
super(JSONSchemaValidator5889Fb844939A13B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"deviceId": {
"type": [
"string",
"null"
]
},
"siteId": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"Default",
"AccessPoint",
"StackSwitch",
"Sensor",
"MobilityExpress",
null
],
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| 31.811765 | 78 | 0.58395 | [
"MIT"
] | cisco-en-programmability/dnacentersdk | dnacentersdk/models/validators/v1_2_10/jsd_5889fb844939a13b.py | 2,704 | Python |
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class AtresPlayerIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?atresplayer\.com/[^/]+/[^/]+/[^/]+/[^/]+/(?P<display_id>.+?)_(?P<id>[0-9a-f]{24})"
_NETRC_MACHINE = "atresplayer"
_TESTS = [
{
"url": "https://www.atresplayer.com/antena3/series/pequenas-coincidencias/temporada-1/capitulo-7-asuntos-pendientes_5d4aa2c57ed1a88fc715a615/",
"info_dict": {
"id": "5d4aa2c57ed1a88fc715a615",
"ext": "mp4",
"title": "Capítulo 7: Asuntos pendientes",
"description": "md5:7634cdcb4d50d5381bedf93efb537fbc",
"duration": 3413,
},
"params": {
"format": "bestvideo",
},
"skip": "This video is only available for registered users",
},
{
"url": "https://www.atresplayer.com/lasexta/programas/el-club-de-la-comedia/temporada-4/capitulo-10-especial-solidario-nochebuena_5ad08edf986b2855ed47adc4/",
"only_matching": True,
},
{
"url": "https://www.atresplayer.com/antena3/series/el-secreto-de-puente-viejo/el-chico-de-los-tres-lunares/capitulo-977-29-12-14_5ad51046986b2886722ccdea/",
"only_matching": True,
},
]
_API_BASE = "https://api.atresplayer.com/"
def _real_initialize(self):
self._login()
def _handle_error(self, e, code):
if isinstance(e.cause, compat_HTTPError) and e.cause.code == code:
error = self._parse_json(e.cause.read(), None)
if error.get("error") == "required_registered":
self.raise_login_required()
raise ExtractorError(error["error_description"], expected=True)
raise
def _login(self):
username, password = self._get_login_info()
if username is None:
return
self._request_webpage(self._API_BASE + "login", None, "Downloading login page")
try:
target_url = self._download_json(
"https://account.atresmedia.com/api/login",
None,
"Logging in",
headers={"Content-Type": "application/x-www-form-urlencoded"},
data=urlencode_postdata(
{
"username": username,
"password": password,
}
),
)["targetUrl"]
except ExtractorError as e:
self._handle_error(e, 400)
self._request_webpage(target_url, None, "Following Target URL")
def _real_extract(self, url):
display_id, video_id = re.match(self._VALID_URL, url).groups()
try:
episode = self._download_json(
self._API_BASE + "client/v1/player/episode/" + video_id, video_id
)
except ExtractorError as e:
self._handle_error(e, 403)
title = episode["titulo"]
formats = []
for source in episode.get("sources", []):
src = source.get("src")
if not src:
continue
src_type = source.get("type")
if src_type == "application/vnd.apple.mpegurl":
formats.extend(
self._extract_m3u8_formats(
src, video_id, "mp4", "m3u8_native", m3u8_id="hls", fatal=False
)
)
elif src_type == "application/dash+xml":
formats.extend(
self._extract_mpd_formats(src, video_id, mpd_id="dash", fatal=False)
)
self._sort_formats(formats)
heartbeat = episode.get("heartbeat") or {}
omniture = episode.get("omniture") or {}
get_meta = lambda x: heartbeat.get(x) or omniture.get(x)
return {
"display_id": display_id,
"id": video_id,
"title": title,
"description": episode.get("descripcion"),
"thumbnail": episode.get("imgPoster"),
"duration": int_or_none(episode.get("duration")),
"formats": formats,
"channel": get_meta("channel"),
"season": get_meta("season"),
"episode_number": int_or_none(get_meta("episodeNumber")),
}
| 35.84127 | 169 | 0.550044 | [
"MIT"
] | Pagasis/YouTua | youtuatools/extractor/atresplayer.py | 4,517 | Python |
#!/usr/bin/python
import sys
import cgi
import cgitb
import sqlite3
reload(sys)
sys.setdefaultencoding('utf-8')
cgitb.enable()
# html
print("Content-type: text/html\n")
print('<meta charset="utf-8">')
print("<html><head>")
print('''<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>''')
print("<title>BRITE REU Candidates</title>")
print('''<link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/nav.css">
<link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/appadmin.css">
</head>''')
print("<body>")
print('''<div id="bg-image">''')
print('''<div id="topnav">
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/show_applicant_admin.py">Applicant List</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/stats_admin.py">Applicant Statistics</a>
<a href="#assign users">Assign Users</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/input_projects.py">Input Faculty Projects</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/review_summary_admin.py">View All Past Reviews</a>
<a class="active" href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py">Assign Candidates to Faculty</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/can_pref.py">Candidate Preferences</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/match.py">Match Candidates to Faculty</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/finalmatch.py">Final Matches</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/help_admin.py">Help</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/about_admin.py">About/Contact</a>
</div>''')
print("<h3>Select Checkboxes to Assign Candidates to Faculty Members</h3>")
print("<h4>To Remove an Assignment, Uncheck the Checkbox</h4>")
#query to get candidate data for the rows
query1 = "SELECT cid, firstname, lastname FROM Applicant join Candidate on Applicant.aid=Candidate.cid;"
#query to get the faculty and project names for the table headers
query2 = 'SELECT pid, uid, fname || " " || lname || ":\n" || project_name FROM Project JOIN User using(uid) ORDER BY(lname);'
#query to get all current candidate-faculty pairs in the database
query3 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);'
#start connection
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
try:
#execute query 1
c.execute(query1)
#get results to above standard query
results1 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 1</b></font></p>")
try:
#execute query 2
c.execute(query2)
#get results to above standard query
results2 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 2</b></font></p>")
try:
#execute query 3
c.execute(query3)
#get results to above standard query
results3 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 3</b></font></p>")
c.close()
connection.close()
#get all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form data
cfids = [cf[0] for cf in results3]
#retrieve form data
form = cgi.FieldStorage()
#if form is empty, then it's possible that everything is to be deleted from the Assignment table
#if not form:
# if results3:
# truncateStatement = "DELETE FROM Assignment;"
# connection = sqlite3.connect('db/BRITEREU.db')
# c = connection.cursor()
# c.execute(truncateStatement)
# connection.commit()
#check what checkboxes are checked
#if checkbox was selected that was not previously selected - insert those pairs into the Assignment table
#if checkbox is no longer selected - delete those pairs from the Assignment table
if form:
res3 = [pair for pair in cfids]
pairlist = form.getlist("cf")
#find pairs that are in the selected list (pairlist) and not in the current database list (res3)
tobe_inserted = list(set(pairlist) - set(res3))
tobe_inserted = [tuple(i.split("_")) for i in tobe_inserted]
#find pairs that are not in the selected list(pairlist) and are in the current database list (res3)
tobe_removed = list(set(res3) - set(pairlist))
tobe_removed = [tuple(map(int, i.split("_"))) for i in tobe_removed]
if tobe_inserted or tobe_removed:
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
for pair in tobe_inserted:
insertStatement = "INSERT INTO Assignment(cid, pid) VALUES (%s, %s);" % pair
c.execute(insertStatement)
connection.commit()
for pair in tobe_removed:
deleteStatement = 'DELETE FROM Assignment WHERE cid ="%s" and pid ="%s";' % pair
c.execute(deleteStatement)
connection.commit()
c.close()
connection.close()
#query the database again to now get all updated pairs
query4 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);'
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
try:
#execute query 1
c.execute(query4)
#get results to above standard query
results4 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 4</b></font></p>")
#form action for user to submit checkboxes selections
print('''<form name="form1" id="form1" action="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py" method="post" >''')
print('<table id=Candidate class="dataframe">')
print("<tr><th>Candidate ID</th><th>Candidate Name</th>")
#gets list of faculty
#adds all the faculty who are in the database as columns
for faculty in results2:
print("<th>%s</th>") % faculty[2]
print("</tr>")
#get the Project IDs for the projects so that you concatenate to the CID to formulate a value pair
pids = [faculty[0] for faculty in results2]
#added proper URL for reference to reviewer page
#print the candidate table with a checkbox for each faculty member
for row in results1:
print('''<tr><td><a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/reviewer.py?AID=%s">%s</a></td><td>%s %s</td>''') % (row[0], row[0], row[1], row[2])
for f in pids:
for cf_pair in results4:
if (str(row[0])+"_"+str(f)) in cf_pair:
print('<td><input title="%s GMT" type="checkbox" name="cf" value=%s checked="checked" />rank</td>') % (cf_pair[1], (str(row[0])+"_"+str(f)))
break
else:
print('<td><input type="checkbox" name="cf" value=%s /></td>') % (str(row[0])+"_"+str(f))
print("</tr>")
#add submit button for assigning faculty to candidates
print('<input type="submit" value="Assign Candidates" /><br /><br />')
#end form
print("</form>")
#filtering section for the table
print("</table>")
print('''<script src="https://bioed.bu.edu/students_21/group_proj/group_K/tablefilter/tablefilter.js"></script>''')
print('''<script data-config="">
var filtersConfig = {
base_path: 'https://bioed.bu.edu/students_21/divyas3/tablefilter/',
auto_filter: {
delay: 110 //milliseconds
},
filters_row_index: 1,
state: true,
alternate_rows: true,
rows_counter: true,
btn_reset: true,
status_bar: true,
msg_filter: 'Filtering...'
};
var tf = new TableFilter(Candidate, filtersConfig);
tf.init();
</script>''')
print("</body> </html>")
| 39.887755 | 175 | 0.672806 | [
"MIT"
] | DivyaGSun/BRITE_REU_database | assign_candidate.py | 7,818 | Python |
"""Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import (
Camera,
Light,
RecordingMode,
SmartDetectObjectType,
VideoMode,
)
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
CAMERA_SWITCHES_BASIC = [
d
for d in CAMERA_SWITCHES
if d.name != "Detections: Face"
and d.name != "Detections: Package"
and d.name != "SSH Enabled"
]
CAMERA_SWITCHES_NO_EXTRA = [
d for d in CAMERA_SWITCHES_BASIC if d.name not in ("High FPS", "Privacy Mode")
]
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.feature_flags.smart_detect_types = [
SmartDetectObjectType.PERSON,
SmartDetectObjectType.VEHICLE,
]
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[1]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = LIGHT_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[1]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = CAMERA_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES_NO_EXTRA)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
assert description.ufp_set_method is not None
camera.__fields__[description.ufp_set_method] = Mock()
setattr(camera, description.ufp_set_method, AsyncMock())
set_method = getattr(camera, description.ufp_set_method)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[4]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[4]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
| 31.723014 | 88 | 0.738701 | [
"Apache-2.0"
] | LW-Ho/home-assistant | tests/components/unifiprotect/test_switch.py | 15,576 | Python |
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "[email protected],[email protected]"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
| 31.993958 | 118 | 0.580642 | [
"Apache-2.0"
] | pulp-platform/multispectral-riemannian | multiscale_bci_python/riemannian_model.py | 10,590 | Python |
import logging
import os
import random
import time
from functools import lru_cache
import cv2
import numpy as np
import imgreco.main
from Arknights.helper import logger
from addons.activity import ActivityAddOn, get_stage_map
from addons.base import BaseAddOn, pil2cv, crop_cv_by_rect, show_img
from addons.common_cache import load_game_data
from imgreco.ocr.cnocr import ocr_and_correct
icon1 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon1.png'), cv2.IMREAD_GRAYSCALE)
icon2 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon2.png'), cv2.IMREAD_GRAYSCALE)
@lru_cache(maxsize=1)
def get_activity_infos():
return load_game_data('activity_table')['basicInfo']
@lru_cache()
def get_available_activity(display_type=None):
activity_infos = get_activity_infos()
name_set = set()
for aid, info in activity_infos.items():
if info.get('displayType') in {'SIDESTORY', 'BRANCHLINE'}:
if info['displayType'] == 'BRANCHLINE' or info.get('isReplicate'):
raw_name = info['name'][:-3] if info.get('isReplicate') else info['name']
if display_type is None or display_type == info['displayType']:
name_set.add(raw_name)
return name_set
def get_activity_name(activity):
name = activity['name']
if activity['isReplicate']:
return name[:-3]
return name
def crop_image_only_outside(gray_img, raw_img, threshold=128, padding=3):
mask = gray_img > threshold
m, n = gray_img.shape
mask0, mask1 = mask.any(0), mask.any(1)
col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax()
row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax()
return raw_img[row_start - padding:row_end + padding, col_start - padding:col_end + padding]
class StartSpStageAddon(BaseAddOn):
def __init__(self, helper=None):
super(StartSpStageAddon, self).__init__(helper)
self.scale = self.helper.viewport[1] / 720
if self.helper.viewport != (1280, 720):
logger.warning('It may produce some weird effects when the resolution is not 1280x720.')
def apply_scale(self, value):
if self.scale == 1:
return value
return int(value * self.scale)
def run(self, stage_code: str, repeat_times: int = 1000, try_current_activity=True):
stage_code = stage_code.upper()
if try_current_activity:
try:
return ActivityAddOn(self.helper).run(stage_code, repeat_times)
except:
pass
stage_code_map, zone_linear_map = get_stage_map()
if stage_code not in stage_code_map:
raise RuntimeError(f'无效的关卡: {stage_code}')
stage = stage_code_map[stage_code]
activity_id = stage['zoneId'].split('_')[0]
activity_infos = get_activity_infos()
activity = activity_infos[activity_id]
logger.debug(f'stage: {stage}, activity: {activity}')
self.enter_activity(activity)
stage_linear = zone_linear_map[stage['zoneId']]
self.helper.find_and_tap_stage_by_ocr(None, stage_code, stage_linear)
return self.helper.module_battle_slim(None, repeat_times)
def enter_activity(self, activity):
vh = self.vh
act_name = get_activity_name(activity)
if act_name not in get_available_activity():
raise RuntimeError(f'无效的活动: {act_name}')
self.open_terminal()
if activity['displayType'] == 'BRANCHLINE':
self.tap_branch_line()
else:
self.tap_side_story()
crop_flag = activity['displayType'] == 'SIDESTORY'
act_pos_map = self.get_all_act_pos(crop_flag)
if act_name not in act_pos_map:
if activity['displayType'] == 'BRANCHLINE':
raise RuntimeError(f'找不到相应活动: {act_name}')
last_acts = act_pos_map.keys()
while True:
origin_x = random.randint(int(5.833 * vh), int(24.861 * vh))
origin_y = random.randint(int(57.222 * vh), int(77.917 * vh))
move = -random.randint(int(vh // 5), int(vh // 4))
self.helper.adb.touch_swipe2((origin_x, origin_y),
(random.randint(-20, 20), move), random.randint(900, 1200))
act_pos_map = self.get_all_act_pos(crop_flag)
if act_name in act_pos_map:
break
if last_acts == act_pos_map.keys():
raise RuntimeError(f'找不到相应活动: {act_name}')
last_acts = act_pos_map.keys()
logger.info(f'switch to {act_name}')
self.click(act_pos_map[act_name], 1)
self.tap_enter_activity()
def tap_back(self):
vw, vh = self.vw, self.vh
self.helper.tap_rect((2.222 * vh, 1.944 * vh, 22.361 * vh, 8.333 * vh))
time.sleep(0.5)
def get_all_act_pos(self, crop=False):
act_map = {}
screen = self.screenshot()
cv_screen = pil2cv(screen)
for icon in [icon1, icon2]:
act_map.update(self.get_act_pos_by_icon(cv_screen, icon, crop))
logger.info(act_map)
return act_map
def get_act_pos_by_icon(self, cv_screen, icon, crop=False):
vh, vw = self.vh, self.vw
raw_screen = cv_screen.copy()
if self.scale != 1:
cv_screen = cv2.resize(cv_screen, (int(self.helper.viewport[0] / self.scale), 720))
roi = crop_cv_by_rect(cv_screen, (0, 0, 10.000 * vh, 100.000 * vh))
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
result = cv2.matchTemplate(roi, icon, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.8)
tag_set = set()
tag_set2 = set()
res = {}
dbg_screen = raw_screen.copy()
available_activity = get_available_activity()
for pt in zip(*loc[::-1]):
pos_key = (pt[0] // 100, pt[1] // 100)
pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))
if pos_key in tag_set or pos_key2 in tag_set2:
continue
tag_set.add(pos_key)
tag_set2.add(pos_key2)
if icon1 is icon:
x, y = (int(pt[0]) + 35, int(pt[1]) - 6)
tw, th = map(self.apply_scale, (180, 40))
else:
x, y = (int(pt[0]) + 35, int(pt[1]) - 3)
tw, th = map(self.apply_scale, (150, 30))
l, t = map(self.apply_scale, (x, y))
tag_img = raw_screen[t:t + th, l:l + tw]
if crop:
gray_tag = cv2.cvtColor(tag_img, cv2.COLOR_RGB2GRAY)
tag_img = crop_image_only_outside(gray_tag, tag_img, 160)
factor = 2.5 - self.scale
if factor > 1:
# print(factor)
tag_img = cv2.resize(tag_img, (0, 0), fx=factor, fy=factor, interpolation=cv2.INTER_LINEAR)
# show_img(tag_img)
# conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc.
name = ocr_and_correct(tag_img, available_activity, model_name='densenet-lite-fc', log_level=logging.INFO)
if name:
res[name] = (int(l + 85 * self.scale), int(t + 20 * self.scale))
cv2.rectangle(dbg_screen, (l, t), (l + tw, t + th), (255, 255, 0), 2)
# show_img(dbg_screen)
return res
def tap_side_story(self):
vh, vw = self.vh, self.vw
logger.info('open side story view')
self.helper.tap_rect((44.297 * vw, 88.611 * vh, 56.406 * vw, 98.750 * vh))
time.sleep(1)
def tap_branch_line(self):
logger.info('open branch line view')
vh, vw = self.vh, self.vw
self.helper.tap_rect((29.375 * vw, 88.611 * vh, 41.719 * vw, 98.750 * vh))
time.sleep(1)
def tap_enter_activity(self):
logger.info('enter activity')
vh, vw = self.vh, self.vw
self.helper.tap_rect((100 * vw - 24.583 * vh, 69.167 * vh, 100 * vw - 8.750 * vh, 75.556 * vh))
time.sleep(1)
def open_terminal(self):
self.helper.back_to_main()
logger.info('open terminal')
self.helper.tap_quadrilateral(imgreco.main.get_ballte_corners(self.screenshot()))
time.sleep(1)
if __name__ == '__main__':
StartSpStageAddon().run('CB-10', 0, False)
# StartSpStageAddon().get_all_act_pos()
| 40.737864 | 118 | 0.604743 | [
"MIT"
] | Konano/ArknightsAutoHelper | addons/start_sp_stage/__init__.py | 8,440 | Python |
import os
import sys
sys.path.append(os.path.dirname(__file__))
class AbstractSystemMeter:
"""Common system meter interface for all resource monitorings.
For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have
a common "interface" for all system resources to test.
This approach is choosen since python has no real interfaces like Java or C-Sharp.
"""
def __init__(self, resource_name):
self.resource_name = resource_name
def measure(self, func):
self._start()
func()
return self._stop()
def _start(self):
raise NotImplementedError("The method is not implemented yet.")
def _stop(self):
raise NotImplementedError("The method is not implemented yet.")
| 26.666667 | 114 | 0.6975 | [
"MIT"
] | surfmachine/language-detection | measure/system/AbstractSystemMeter.py | 800 | Python |
#!/usr/bin/python
from __future__ import division
import sys
import math
import cmath
import numpy as np
from numpy import genfromtxt
import csv
from decimal import Decimal
import os
import random
from lyrics import *
# BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure
# A tribute to the Beatles
#
# Updated June 14, 2020 by Hassan Harb
#
# / | \
# / | \
# /O O | O O\
# //|\ /|\ /|\ /|\\
# /=/ \=/ \= / \=/ \=\
# / == == == == == \
# / == == == == == \
# (The original Beatles)
# (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles )
#
#########################################################################
#
# NBasGrab: reads in a name of .fchk file
# output: -Number of basis functions
# -Charge
# -Multiplicity
# -Number of Atoms
# -Cartesian Coordinates
# -Atomic Symbols
# -SCF Energy
# -Total Energy (needs to be added)
# Section 1: Reading from gaussian formatted checkpoint file
def NBasGrab(filename):
NBasis = 0
NElem = 0
SCFEnergy = 0.0
Charge = 0
Multiplicity = 0
NAtoms = 0
temp = 1
with open(filename, 'r') as origin:
for line in origin:
if "Number of basis functions" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBasis = NBasis*10 + int(letter)
if "Charge " in line:
words = line.split()
for i in words:
for letter in i:
if(letter=="-"):
temp = -1
if(letter.isdigit()):
Charge = Charge*10 + int(letter)
Charge = Charge*temp
if "Multiplicity" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
Multiplicity = Multiplicity*10 + int(letter)
if "Number of atoms" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAtoms = NAtoms*10 + int(letter)
if "SCF Energy" in line:
words = line.split()
# print "SCF Energy = ", words[3], " Hartree"
SCFEnergy = float(words[3])
# print "SCF Energy (float) = ", SCFEnergy
# if "Total Energy" in line:
# words = line.split()
# TotalEnergy = float(words[3])
# print "Total Energy = ", TotalEnergy, " Hartree"
NElem = NBasis*NBasis
# print "Number of Basis Functions (subroutine) = ", NBasis, "\n"
# print "Charge (subroutine) = ", Charge, "\n"
return NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy
# GeomGet: reads in the file name, number of atoms
# Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom
#
def GeomGet(filename,NAtoms):
p = 0
r = 0
n = 1
NElements = NAtoms * 3
RawCart = np.zeros(NElements)
if (NElements%5 == 0):
n = 0
RawCartLines = int(NElements/5) + n
# print "Raw Cart lines = ", RawCartLines
# print "Number of Atoms =", NAtoms
# print "Number of coordinates =", NElements
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Current cartesian coordinates" in line:
i = i + 1
pointer = i
# print "Cartesian Coordinates starts at line :", pointer
endpointer = pointer + RawCartLines - 1
# print "Cartesian Coordinates ends at line :", endpointer
for m in range(0,endpointer - pointer +1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
RawCart[r] = nextline[p]
r = r + 1
p = 0
# print "Raw Cart (subroutine) = ", RawCart
RawCart = RawCart/1.88973
# print "Raw Cart (converted to Angstroms) = ", RawCart
return RawCart
# GetAtoms: Reads in file name, number of atoms
# output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms
#
def GetAtoms(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicNum = np.zeros(NAtoms)
if (NAtoms%6 ==0):
n = 0
AtomLines = int(NAtoms/6) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Atomic numbers" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicNum[r] = nextline[p]
r = r + 1
p = 0
return AtomicNum
# MatGrab: Reads in filename, NBasis, user-defined switch
# Output: -Alpha MO Coefficients (Done)
# -Beta MO Coefficients (Done)
# -Alpha Density Matrix (Done)
# -Beta Density Matrix (Done)
# -Alpha MO Energies (Done)
# -Beta MO Energies (Done)
#
# Switch: 1 = Alpha MO Coefficients
# -1 = Beta MO Coefficients
# 2 = Alpha and Beta Density Matrices
# 3 = Alpha MO Energies
# -3 = Beta MO Energies
#
def MatGrab(filename,NBasis,switch):
if (switch == 1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
AOE = 0
MOrawa = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
# print "Alpha MO coefficients starts at line :", i
j=i+MOlines-1
# print "Alpha MO coefficients ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawa[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawa
return MOrawa
if (switch == -1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
BOE = 0
BMO = 0
MOrawb = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
i=i+1
BMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawb[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawb
return MOrawb
if (switch == 2):
filename1 = filename
PElements = int(NBasis*(NBasis+1)/2)
Plines = int(PElements/5) + 1
TotalPraw = np.zeros(PElements)
SpinPraw = np.zeros(PElements)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Total SCF Density" in line:
i=i+1
r = 0
p = 0
# print "Total SCF Density starts at line :", i
j=i+Plines-1
# print "Total SCF Density ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(0,len(nextline)):
if (r != PElements):
TotalPraw[r] = nextline[p]
r = r+1
p = 0
# HH + : Bug ... :(
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Spin SCF Density" in line:
# print "Found Spin density!"
i=i+1
r = 0
p = 0
# print "Spin SCF Density starts at line: ", i
j=i+Plines-1
# print "Spin SCF Density ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
if (r != PElements):
SpinPraw[r] = nextline[p]
r = r+1
p = 0
# HH - : End of bug (hopefully!)
PalphaRaw = (np.add(TotalPraw,SpinPraw)) * 0.5
PbetaRaw = (np.subtract(TotalPraw,SpinPraw)) * 0.5
Palpha = symmetrize(PalphaRaw)
Pbeta = symmetrize(PbetaRaw)
return Palpha, Pbeta
if (switch == 3):
filename1 = filename
AlphaMO = np.zeros(NBasis)
AlphaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
AlphaMOlines = AlphaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Alpha MO Energies starts at line: ", i
j = i + AlphaMOlines - 1
# print "Alpha MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AlphaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Alpha MO energies = ", AlphaMO
return AlphaMO
if (switch == -3):
filename1 = filename
BetaMO = np.zeros(NBasis)
BetaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
BetaMOlines = BetaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Beta MO Energies starts at line: ", i
j = i + BetaMOlines - 1
# print "Beta MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
BetaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Beta MO energies = ", BetaMO
return BetaMO
# sci_notation: reads in a number
# output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py
#
def sci_notation(n):
a = '%.8f' % n
return '%.8f' % Decimal(n.real)
# fchk_notation: reads in a number
# output: prints the number in the desired notation for fchk files
#
def fchk_notation(n):
a = '%.8E' % n
return '%.8E' % Decimal(n.real)
# AtomicSymbol: Reads in atomic number of the element
# Output: -Atomic Symbol
#
def AtomicSymbol(AtomicNumber):
p = AtomicNumber - 1
PTlist = ['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ah','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hb','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn','Uut','Fl','Uup','Lv','Uus','Uuo']
# print "There are currently ", len(PTlist), " atoms defined"
return PTlist[p]
# Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix
# Output: -Matrix(NBasis,NBasis)
#
def symmetrize(a):
Nbas = int((np.sqrt(8*len(a)+1)-1)/2)
b = np.zeros((Nbas,Nbas))
n = 0
for i in range(0,Nbas):
for j in range(0,i+1):
b[i,j]=a[n]
b[j,i]=a[n]
n=n+1
return b
# Column2Square: Reads in a packed column matrix, number of basis functions.
# Output: -Matrix(NBasis,NBasis)
def column2square(A,NBasis):
C = np.zeros((NBasis,NBasis))
t=0
for i in range(0,NBasis):
for j in range(0,NBasis):
C[j,i]=float(A[t])
t=t+1
return C
# GetOverlap: Reads in packed column matrix, number of basis functions.
# Output: -Overlap Matrix (NBasis,NBasis)
def GetOverlap(A,NBasis):
C = column2square(A,NBasis)
CInv = np.linalg.inv(C)
S = np.dot(np.transpose(CInv),CInv)
return S
# PrintSI: Reads in filename, user-defined switch
# Output: -SCF Energy, Charge, Multiplicity, Geometry
#
# Switch: 1 = print to new file (filename1-SI.txt)
# -1 = print to screen
#
def PrintSI(filename1,switch):
NBasis, NElementsGrab, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename1)
AtomicNum = GetAtoms(filename1,NAtoms)
RawCart = GeomGet(filename1,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
filename2 = os.path.splitext(filename1)[0] + "-SI.txt"
filename1 = os.path.splitext(filename1)[0]
if (switch == 1):
with open(filename2,'w') as f2:
f2.write("SI info for ")
f2.write(filename1)
f2.write("\n\n")
f2.write("SCF Energy = ")
f2.write(str(SCFEnergy))
f2.write(" Hartree")
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
f2.write(Atom)
f2.write(" ")
for j in range(0,3):
if (Cart[i,j] >= 0):
f2.write(" ")
f2.write(str(sci_notation(Cart[i,j])))
f2.write(" ")
f2.write("\n")
f2.write(" ")
f2.write("\n\n")
return filename2
if (switch == -1):
print "SCF Energy = ", SCFEnergy, " Hartree\n"
print "Charge = ", Charge, "\n"
print "Multiplicity = ", Multiplicity, "\n"
print "Cartesian Geometry:\n"
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
print Atom, sci_notation(Cart[i,0]), sci_notation(Cart[i,1]), sci_notation(Cart[i,2])
print "\n"
# CalcNO: Reads in filename, NBasis
# Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta)
#
def CalcNO(filename,NBasis):
Palpha, Pbeta = MatGrab(filename,NBasis,2)
C = MatGrab(filename,NBasis,1)
S = GetOverlap(C,NBasis)
Svals, Svecs = np.linalg.eig(S)
Sval_minhalf = (np.diag(Svals**(0.5)))
Shalf = np.dot(Svecs,np.dot(Sval_minhalf,np.transpose(Svecs)))
NOvalsA, NOvecsA = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Palpha)))
NOvalsB, NOvecsB = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Pbeta)))
NOvalsA = NOvalsA.real
NOvalsB = NOvalsB.real
NOvecsA = NOvecsA.real
NOvecsB = NOvecsB.real
NOvecsA = np.dot(np.linalg.inv(Shalf),NOvecsA)
NOvecsB = np.dot(np.linalg.inv(Shalf),NOvecsB)
return NOvecsA, NOvecsB, NOvalsA, NOvalsB
# NElec: Reads in filename
# Output: Total number of electrons, Alpha Electrons, Beta Electrons
#
def NElec(filename):
NElec = 0
NAlpha = 0
NBeta = 0
with open(filename, 'r') as origin:
for line in origin:
if "Number of electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NElec = NElec*10 + int(letter)
if "Number of alpha electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAlpha = NAlpha*10 + int(letter)
if "Number of beta electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBeta = NBeta*10 + int(letter)
return NElec, NAlpha, NBeta
# OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n
# Output: New Density Matrices: P' = S**(1-n).P.S**(n)
#
def OrbTransform(Pa,Pb,S,n):
Svals, Svecs = np.linalg.eig(S)
Sval1 = np.diag(Svals**(n))
Sval2 = np.diag(Svals**(1-n))
Sdag1 = np.dot(Svecs,np.dot(Sval1,np.transpose(Svecs)))
Sdag2 = np.dot(Svecs,np.dot(Sval2,np.transpose(Svecs)))
PdagAlpha = np.dot(Sdag1,np.dot(Pa,Sdag2))
PdagBeta = np.dot(Sdag1,np.dot(Pb,Sdag2))
# print "OrbTransform Subroutine test:\n"
# print "PdagAlpha = ", PdagAlpha, "\n"
# print "PdagBeta = ", PdagBeta, "\n"
OvalsA, OvecsA = np.linalg.eig(PdagAlpha)
OvalsB, OvecsB = np.linalg.eig(PdagBeta)
# print "OVals A = ", OvalsA, "\n"
# print "OVecs A = ", OvecsA, "\n"
# print "OVals B = ", OvalsB, "\n"
# print "OVecs B = ", OvecsB, "\n"
return PdagAlpha, PdagBeta, OvecsA, OvecsB, OvalsA, OvalsB
# CartoZmat: Transforms Cartesian coordinates to z-matrix form
# Input: NAtoms, RawCart, AtomicNum
# Output: z-matrix printed on the screen
#
# Note that there are three other functions here, Dist, Angle, and Torsion.
# They are used to calculate the appropriate parameters for the z-matrix
# switch = 1 : print z-matrix to screen
# switch = -1 : print z-matrix to new textfile
def DistAB(e1,e2):
R = 0.0
for i in range(len(e1)):
R = R + (e1[i]-e2[i])**(2)
R = R**(0.5)
return R
def AngleABC(e1,e2,e3):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = - (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = - (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = - (e3[2] - e2[2]) / DistAB(e2,e3)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
cos_angle = np.dot(eab,ebc)
angle = np.arccos(cos_angle) / 3.1415926535 * 180
return eab, ebc, angle
def TorsionABCD(e1,e2,e3,e4):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = (e3[2] - e2[2]) / DistAB(e2,e3)
ecd_x = (e4[0] - e3[0]) / DistAB(e3,e4)
ecd_y = (e4[1] - e3[1]) / DistAB(e3,e4)
ecd_z = (e4[2] - e3[2]) / DistAB(e3,e4)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
ecd = [ecd_x, ecd_y, ecd_z]
n1 = np.cross(eab,ebc) / (np.linalg.norm(np.cross(eab,ebc)))
n2 = np.cross(ebc,ecd) / (np.linalg.norm(np.cross(ebc,ecd)))
u1 = n2
u3 = ebc/np.linalg.norm(ebc)
u2 = np.cross(u3,u1)
cos_angle = np.dot(n1,n2)
sin_angle = np.dot(n1,u2)
angle = -math.atan2(sin_angle,cos_angle) / 3.1415926535 * 180
return angle
def CartoZmat(RawCart,NAtoms,AtomicNum,filename2,switch):
if (switch == 1):
Cart = np.resize(RawCart,(NAtoms,3))
# print "Cartesian = ", Cart
# print "Atoms list = ", AtomicNum
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
print Symbol, 1 , R , 2, A , 3, D
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
print Symbol, 1 , R , 2, A
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
print Symbol, 1, R
elif (i == 0):
print Symbol
elif (switch == -1):
Cart = np.resize(RawCart,(NAtoms,3))
#open new file
filename = os.path.splitext(filename2)[0] + "-zmat.txt"
with open(filename,'w') as f2:
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename2)
f2.write("Z-Matrix file for ")
f2.write(filename2)
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write(" 2 ")
f2.write( str(A))
f2.write(" 3 ")
f2.write(str(D))
f2.write("\n")
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
f2.write(str(Symbol))
f2.write(" 1 ")
f2.write (str(R))
f2.write(" 2 ")
f2.write(str(A))
f2.write("\n")
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write("\n")
elif (i == 0):
f2.write(Symbol)
f2.write("\n")
# print "test test"
# Section 2: Reading from gaussian matrix files
# MatGrab2: Reads in matrices from gaussian matrix file
#
# Switch: 1 : Alpha Core Hamiltonian
# -1 : Beta Core Hamiltonian
# 2 : Alpha Fock Matrix
# -2 : Beta Fock Matrix
# 3 : Dipole matrix elements (x,y,z) [IN PROGRESS]
def MatGrab2(filename,NBasis,switch):
print "Reading from Matrix file\n"
if (switch == 1):
print "Reading Alpha Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawa = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN ALPHA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawa[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawa
if (switch == -1):
print "Reading Beta Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawb = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN BETA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawb[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawb
if (switch == 2):
print "Reading Alpha Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawA = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "ALPHA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawA[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawA
if (switch == -2):
print "Reading Beta Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawB = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "BETA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawB[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawB
if (switch == 3):
# print "Reading Dipole integrals, matrix x\n"
NElements = int(NBasis*(NBasis +1)/2)
# print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n"
DipX_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 1" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
# print "Done reading Dipole X matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipX_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip X raw = ", DipX_Raw
# print "Reading Dipole integrals, matrix y\n"
NElements = int(NBasis*(NBasis +1)/2)
print "Looking for ", NElements, " elements of the Dipole integrals matrix y\n"
DipY_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 2" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
# print "Done reading Dipole Y matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipY_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip Y raw = ", DipY_Raw
# print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n"
DipZ_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 3" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done reading Dipole Z matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipZ_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip Z raw = ", DipZ_Raw
return symmetrizeMat(DipX_Raw), symmetrizeMat(DipY_Raw), symmetrizeMat(DipZ_Raw)
# SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix
# Input: Packed lower triangular A
# Output: N x N Matrix
def symmetrizeMat(a):
NBasis = int((np.sqrt(8*len(a)+1)-1)/2)
NewMat = np.zeros((NBasis,NBasis))
NElements = len(a)
t = 0
l = 0
start = 0
loop = NBasis
nBlock = int(NBasis/5)
nRem = NBasis%5
# print "nBlock = ", nBlock
# print "nRem = ", nRem
i = start
j = start
if (nBlock == 0):
nBlock =1
while (l < nBlock):
# print "retrieving block ", l
for i in range (start,loop):
for j in range(start,start+5):
if (j<=i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
start = start + 5
l = l + 1
# print "t = ", t
# print "values of i and j after nBlock loop is over: ", i, j
j = j + 1
start = j
# print "NBasis - nRem = ", NBasis -nRem
i = NBasis - nRem
while (i < NBasis):
j = start
while (j <= i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
j = j + 1
i = i + 1
# print "final value of t = ", t
return NewMat
# ERIRead: reads in regular 2e integrals from formatted matrix file
# Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals)
# Input: matrix filename
# Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value
#
# Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d)
def swap(a,b):
return b,a
def Fourindex(a,b,c,d):
a = int(a)
b = int(b)
c = int(c)
d = int(d)
if (a < b):
a, b = swap(a,b)
if (c < d):
c, d = swap(c,d)
e = int(a*(a+1)/2 + b)
f = int(c*(c+1)/2 + d)
if (e<f):
e,f = swap(e,f)
g = e*(e +1)/2 + f
return int(g)
def ERIRead(filename,NBasis):
NElements = 0
p = 0
print "Reading ERIs from Gaussian Matrix File"
print "Subroutine can only read regular 2e integrals (NO RAFINETTI)"
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Label REGULAR 2E INTEGRALS" in line:
print "Found 2e integrals!"
words = line.split()
print "Total number of elements = ", words[9]
NElements = int(words[9])
print "NElements = ", NElements
eri_raw = np.zeros((NElements,5))
while (p < NElements):
nextline = origin.next()
words = nextline.split()
eri_raw[p,0] = words[1]
eri_raw[p,1] = words[3]
eri_raw[p,2] = words[5]
eri_raw[p,3] = words[7]
eri_raw[p,4] = float(words[9].replace('D','E'))
# print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4]
p = p + 1
# print "ERI RAW = ", eri_raw
NTotal = Fourindex(NBasis,NBasis,NBasis,NBasis) + 1
eri_array = np.zeros(NTotal)
eri_compact = np.zeros((NElements,2))
print "Total length of sparse 1D vector =", NTotal
print "Now forming compound indices"
for i in range(0,NElements):
eri_compact[i,0] = Fourindex(eri_raw[i,0], eri_raw[i,1], eri_raw[i,2], eri_raw[i,3])
eri_compact[i,1] = eri_raw[i,4]
eri_array[int(eri_compact[i,0])] = eri_compact[i,1]
# print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]]
return eri_array
# OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices
# Input: A: MO Coefficient (NBasis x NBasis)
# NBasis
# NOcc = number of electrons
#
# Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs
# A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs
## Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs.
def OVParse(A,NBasis,NOcc):
A_Occ = np.zeros((NBasis,NOcc))
A_Virt = np.zeros((NBasis,NBasis-NOcc))
for i in range(0,NOcc):
A_Occ[:,i] = A[:,i]
for j in range(0,NBasis-NOcc):
A_Virt[:,j] = A[:,j+NOcc]
return A_Occ, A_Virt
# Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap
# Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine)
# S: AO overlap matrix
#
# Output: the final value of the overlap
#
# Option: switch: 1 : print all relevant matrices
# -1 : Dont print any matrices
#
def Biorthog(A,B,S,switch): # eqn numbers based on personal notes
D = np.dot(np.transpose(B),np.dot(S,A)) # eq. 1
u, d, v = np.linalg.svd(D,full_matrices=True) # eq. 2
DtD = np.dot(np.transpose(D),D)
l, V = np.linalg.eig(DtD)
U = np.dot(D,V)
if (switch==1):
print "D = ", D
print "DtD = ", DtD
print "lambdas = ", l
print "Eig Vecs of DtD = ", V
print "Determinants = ", np.linalg.det(u), np.linalg.det(v)
print "u = ", u
print "v = ", v
overlap = np.linalg.det(u)*np.prod(d)*np.linalg.det(v)
return d, u, v, D
# PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1)
# Input: A: Two dimensional matrix
# NBasis: Number of basis functions for A
# i: the position of the column to be selected
#
# Output: One dimensional array (NBasis,1) that is the i-th column of matrix A
#
def PickColumn(A,NBasis,i):
A_Column = np.zeros((NBasis,1))
for j in range(0,NBasis):
A_Column[j,0] = A[j,i]
return A_Column
# WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOs(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
AOE = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
r = 0
i=i+1
BMO = i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
pointer=0
counter=1
with open(filename1,'r') as origin:
data = origin.readlines()
if "Alpha Orbital Energies" in line:
AOE = i
BOE = AOE + int(NBasis/5) + 1
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < AOE+1):
f2.write(data[pointer])
pointer = pointer+1
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(data[BOE])
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
f2.write(data[AMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
while (pointer < len(data)):
f2.write(data[pointer])
pointer = pointer+1
print "Done."
# OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix
# Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis
#
# Output : V = Full MO Coefficient Matrix
#
# (this subroutine has the exact opposite functionality of OVParse)
#
def OVMerge(A,B,NOcc,NBasis):
V = np.zeros((NBasis,NBasis))
for i in range(0,NOcc):
V[:,i] = A[:,i]
for j in range(NOcc,NBasis):
V[:,j] = B[:,j-NOcc]
return V
# DistanceMatrix: Calculates distances between all atoms in a molecule
# Input : fchk file name
#
# Output : Returns Distance Matrix and Atomic Symbol array.
#
# Unfinished part: generate and return a distance matrix (NAtoms x NAtoms)
#
def DistanceMatrix(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
Atomic_Numbers = GetAtoms(filename,NAtoms)
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
RawCart = GeomGet(filename,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
Distance_Matrix = np.zeros((NAtoms,NAtoms))
for i in range(0,NAtoms):
for j in range(i+1,NAtoms):
e2 = [Cart[j,0],Cart[j,1],Cart[j,2]]
e1 = [Cart[i,0],Cart[i,1],Cart[i,2]]
Distance_Matrix[i,j] = np.around(DistAB(e1,e2),decimals=2)
Distance_Matrix[j,i] = np.around(DistAB(e1,e2),decimals=2)
return Distance_Matrix, Atomic_Symbol
# PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs
# Input: None, but reads in the lyrics.py library file (partially complete)
#
# Output: None, prints lyrics.
#
def PrintLyrics():
n = random.randint(1,32)
LyricsLibrary(n)
# GetAtomicWeights: Grabs the "real atomic weights" from the fchk file
# Input: filename, Number of Atoms
#
# Output: One dimensional array, AtomicWeight, of dimensions NAtoms.
#
def GetAtomicWeights(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicWeight = np.zeros(NAtoms)
if (NAtoms%5 ==0):
n = 0
AtomLines = int(NAtoms/5) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Real atomic weights" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicWeight[r] = nextline[p]
r = r + 1
p = 0
AtomicWeight = np.around(AtomicWeight,decimals=3)
return AtomicWeight
# WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version)
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOsQChem(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i+1
AOE_header = line
if "Alpha MO coefficients" in line:
AMO = i+1
AMO_header = line
if "Beta Orbital Energies" in line:
BOE = i+1
BOE_header = line
if "Beta MO coefficients" in line:
BMO = i+1
BMO_header = line
pointer=0
counter=1
Start_point = min(AMO,BMO,AOE,BOE)
with open(filename1,'r') as origin:
data = origin.readlines()
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < Start_point-1):
f2.write(data[pointer])
pointer = pointer+1
print "pointer at line = ", pointer
f2.write(AOE_header)
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(BOE_header)
# f2.write("Beta Orbital Energies\n")
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
# f2.write("Alpha MO coefficients\n")
f2.write(AMO_header)
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
# f2.write("Beta MO Coefficients\n")
f2.write(BMO_header)
# f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
# while (pointer < len(data)):
# f2.write(data[pointer])
# pointer = pointer+1
print "Done."
# ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar)
# Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis
#
# Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu)
#
def ContractMat(A,B,NBasis):
value = 0.0
for i in range(0,NBasis):
for j in range(0,NBasis):
value = value + A[i,j]*B[i,j]
return value
# Work in progress: Basis set reader:
def ReadBasisSet(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
print "Number of Basis functions =", NBasis
print "Number of atoms =", NAtoms
Atomic_Numbers = GetAtoms(filename,NAtoms)
print "Atomic Numbers =", Atomic_Numbers
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
print "Atomic Symbols =", Atomic_Symbol
| 34.443277 | 594 | 0.499665 | [
"MIT"
] | HassanHarb92/BEATLES | BEATLES.py | 49,185 | Python |
"""Utility functions related to file operations."""
import copy
import logging
import os
import subprocess
import sys
from argparse import Namespace
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union
# import wcmatch
import wcmatch.pathlib
from wcmatch.wcmatch import RECURSIVE, WcMatch
from ansiblelint.config import BASE_KINDS, options
from ansiblelint.constants import FileType
if TYPE_CHECKING:
# https://github.com/PyCQA/pylint/issues/3979
BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object
else:
BasePathLike = os.PathLike
_logger = logging.getLogger(__package__)
def normpath(path: Union[str, BasePathLike]) -> str:
"""
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# conversion to string in order to allow receiving non string objects
relpath = os.path.relpath(str(path))
abspath = os.path.abspath(str(path))
# we avoid returning relative paths that endup at root level
if abspath in relpath:
return abspath
return relpath
@contextmanager
def cwd(path: Union[str, BasePathLike]) -> Iterator[None]:
"""Context manager for temporary changing current working directory."""
old_pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
def expand_path_vars(path: str) -> str:
"""Expand the environment or ~ variables in a path string."""
# It may be possible for function to be called with a Path object
path = str(path).strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths: List[str]) -> List[str]:
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
def kind_from_path(path: Path, base: bool = False) -> FileType:
"""Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
"""
# pathlib.Path.match patterns are very limited, they do not support *a*.yml
# glob.glob supports **/foo.yml but not multiple extensions
pathex = wcmatch.pathlib.PurePath(path.absolute().resolve())
kinds = options.kinds if not base else BASE_KINDS
for entry in kinds:
for k, v in entry.items():
if pathex.globmatch(
v,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
return str(k) # type: ignore
if base:
# Unknown base file type is default
return ""
if path.is_dir():
return "role"
if str(path) == '/dev/stdin':
return "playbook"
# Unknown file types report a empty string (evaluated as False)
return ""
class Lintable:
"""Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
"""
def __init__(
self,
name: Union[str, Path],
content: Optional[str] = None,
kind: Optional[FileType] = None,
):
"""Create a Lintable instance."""
# Filename is effective file on disk, for stdin is a namedtempfile
self.filename: str = str(name)
self.dir: str = ""
self.kind: Optional[FileType] = None
if isinstance(name, str):
self.name = normpath(name)
self.path = Path(self.name)
else:
self.name = str(name)
self.path = name
self._content = content
# if the lintable is part of a role, we save role folder name
self.role = ""
parts = self.path.parent.parts
if 'roles' in parts:
role = self.path
while role.parent.name != "roles" and role.name:
role = role.parent
if role.exists:
self.role = role.name
if str(self.path) in ['/dev/stdin', '-']:
# pylint: disable=consider-using-with
self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml")
self.filename = self.file.name
self._content = sys.stdin.read()
self.file.write(self._content)
self.file.flush()
self.path = Path(self.file.name)
self.name = 'stdin'
self.kind = 'playbook'
self.dir = '/'
else:
self.kind = kind or kind_from_path(self.path)
# We store absolute directory in dir
if not self.dir:
if self.kind == "role":
self.dir = str(self.path.resolve())
else:
self.dir = str(self.path.parent.resolve())
# determine base file kind (yaml, xml, ini, ...)
self.base_kind = kind_from_path(self.path, base=True)
def __getitem__(self, key: Any) -> Any:
"""Provide compatibility subscriptable support."""
if key == 'path':
return str(self.path)
if key == 'type':
return str(self.kind)
raise NotImplementedError()
def get(self, key: Any, default: Any = None) -> Any:
"""Provide compatibility subscriptable support."""
try:
return self.__getitem__(key)
except NotImplementedError:
return default
@property
def content(self) -> str:
"""Retried file content, from internal cache or disk."""
if self._content is None:
with open(self.path, mode='r', encoding='utf-8') as f:
self._content = f.read()
return self._content
def __hash__(self) -> int:
"""Return a hash value of the lintables."""
return hash((self.name, self.kind))
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if isinstance(other, Lintable):
return bool(self.name == other.name and self.kind == other.kind)
return False
def __repr__(self) -> str:
"""Return user friendly representation of a lintable."""
return f"{self.name} ({self.kind})"
def discover_lintables(options: Namespace) -> Dict[str, Any]:
"""Find all files that we know how to lint."""
# git is preferred as it also considers .gitignore
git_command = ['git', 'ls-files', '-z']
out = None
try:
out = subprocess.check_output(
git_command, stderr=subprocess.STDOUT, universal_newlines=True
).split("\x00")[:-1]
_logger.info("Discovered files to lint using: %s", ' '.join(git_command))
except subprocess.CalledProcessError as exc:
if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output):
_logger.warning(
"Failed to discover lintable files using git: %s",
exc.output.rstrip('\n'),
)
except FileNotFoundError as exc:
if options.verbosity:
_logger.warning("Failed to locate command: %s", exc)
if out is None:
exclude_pattern = "|".join(options.exclude_paths)
_logger.info("Looking up for files, excluding %s ...", exclude_pattern)
out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match()
return OrderedDict.fromkeys(sorted(out))
def guess_project_dir() -> str:
"""Return detected project dir or user home directory."""
try:
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
check=False,
)
except FileNotFoundError:
# if git is absent we use home directory
return str(Path.home())
if result.returncode != 0:
return str(Path.home())
return result.stdout.splitlines()[0]
def expand_dirs_in_lintables(lintables: Set[Lintable]) -> None:
"""Return all recognized lintables within given directory."""
should_expand = False
for item in lintables:
if item.path.is_dir():
should_expand = True
break
if should_expand:
# this relies on git and we do not want to call unless needed
all_files = discover_lintables(options)
for item in copy.copy(lintables):
if item.path.is_dir():
for filename in all_files:
if filename.startswith(str(item.path)):
lintables.add(Lintable(filename))
| 32.849817 | 87 | 0.614184 | [
"MIT"
] | ssato/ansible-lint | src/ansiblelint/file_utils.py | 8,968 | Python |
# -*- coding: utf-8 -*-
import gzip
import bz2
import numpy as np
def advanced_open(filepath, *args, **kwargs):
""" Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
"""
open_fn = open
if filepath.endswith('.gz'):
open_fn = gzip.open
elif filepath.endswith('.bz2'):
open_fn = bz2.open
return open_fn(filepath, mode="rt", *args, **kwargs)
def load_kg_file(filepath, separator="\t", as_stream=False):
""" Import knowledge graph from file
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
iterator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
kg_triples = []
with advanced_open(filepath) as file_content:
for line in file_content:
kg_triples.append(line.strip().split(separator))
return np.array(kg_triples)
def load_kg_file_as_stream(filepath, separator="\t"):
""" Import knowledge graph from file as a stream
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
generator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
with advanced_open(filepath) as file_content:
for line in file_content:
yield line.strip().split(separator) | 21.702703 | 77 | 0.608966 | [
"MIT"
] | hpi-sam/GNN-Effectants | benchmarking/libkge/libkge/io/base.py | 1,606 | Python |
import _plotly_utils.basevalidators
class TickformatstopdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self,
plotly_name="tickformatstopdefaults",
parent_name="histogram2d.colorbar",
**kwargs,
):
super(TickformatstopdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
| 28.136364 | 86 | 0.602585 | [
"MIT"
] | labaran1/plotly.py | packages/python/plotly/plotly/validators/histogram2d/colorbar/_tickformatstopdefaults.py | 619 | Python |
from flask import render_template
from app import app
@app.errorhandler(404)
def page_not_found(e):
return render_template('http404.html'), 404
@app.errorhandler(403)
def page_not_found(e):
return render_template('http403.html'), 403
@app.errorhandler(500)
def page_not_found(e):
return render_template('http500.html'), 500
| 19 | 47 | 0.75731 | [
"MIT"
] | c1c1/network-config-generator | app/views/errorhandler_views.py | 342 | Python |
import os
import requests
import datetime
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404
from django.template import loader
from django.contrib.auth import login
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from requests import status_codes
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework import viewsets, status
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from constance import config
import constance.settings
from tau.twitch.models import TwitchAPIScope, TwitchEventSubSubscription
from tau.users.models import User
from .forms import ChannelNameForm, FirstRunForm
from .utils import cleanup_remote_webhooks, cleanup_webhooks, log_request, check_access_token_expired, refresh_access_token, teardown_all_acct_webhooks, teardown_webhooks
from tau.twitch.models import TwitchHelixEndpoint
@api_view(['POST'])
def irc_message_view(request):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)('twitchchat', {
'type': 'twitchchat.event',
'data': request.data
})
return Response({}, status=status.HTTP_201_CREATED)
@api_view(['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
def helix_view(request, helix_path=None):
if check_access_token_expired():
refresh_access_token()
try:
endpoint_instance = TwitchHelixEndpoint.objects.get(
endpoint=helix_path,
method=request.method
)
if endpoint_instance.token_type == 'OA':
token = config.TWITCH_ACCESS_TOKEN
else:
token = config.TWITCH_APP_ACCESS_TOKEN
except TwitchHelixEndpoint.DoesNotExist:
token = config.TWITCH_ACCESS_TOKEN
body = request.data
client_id = os.environ.get('TWITCH_APP_ID', None)
headers = {
'Authorization': 'Bearer {}'.format(token),
'Client-Id': client_id
}
url = f'https://api.twitch.tv/helix/' \
f'{helix_path}'
uri = request.build_absolute_uri()
url_params = ''
if uri.count('?') > 0:
url_params = uri.split('?', 1)[1]
if url_params != '':
url += f'?{url_params}'
if request.method == 'GET':
data = requests.get(
url,
headers=headers
)
elif request.method == 'POST':
data = requests.post(
url,
data=body,
headers=headers
)
elif request.method == 'PUT':
data = requests.put(
url,
data=body,
headers=headers
)
print(data)
elif request.method == 'PATCH':
data = requests.patch(
url,
data=body,
headers=headers
)
elif request.method == 'DELETE':
data = requests.delete(
url,
headers=headers
)
try:
if(settings.DEBUG_TWITCH_CALLS):
log_request(data)
stream_data = data.json()
except ValueError:
stream_data = None
return Response(stream_data, status=data.status_code)
def home_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count == 0:
return HttpResponseRedirect('/first-run/')
# elif not request.user.is_authenticated:
# return HttpResponseRedirect('/accounts/login/')
elif config.CHANNEL == '':
return HttpResponseRedirect('/set-channel/')
elif config.SCOPE_UPDATED_NEEDED:
return HttpResponseRedirect('/refresh-token-scope/')
else:
# # template = loader.get_template('home.html')
# template = loader.get_template('dashboard/index.html')
# return HttpResponse(template.render({'config': config}, request))
return HttpResponseRedirect('/dashboard')
def first_run_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count > 0: # If users already exist, it is not first run
return HttpResponseRedirect('/') # reject creating a new super-user
if request.method == 'POST':
form = FirstRunForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
form.cleaned_data['username'],
password=form.cleaned_data['password1']
)
user.is_superuser=True
user.is_staff=True
user.save()
login(request, user)
return HttpResponseRedirect('/')
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
def get_channel_name_view(request):
if request.method == 'POST':
port = os.environ.get('PORT', 8000)
form = ChannelNameForm(request.POST)
if form.is_valid():
# Process the data
config.CHANNEL = form.cleaned_data['channel_name']
scope=' '.join(settings.TOKEN_SCOPES)
client_id = os.environ.get('TWITCH_APP_ID', None)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
else:
# Show some error page
pass
else:
template = loader.get_template('registration/twitch-channel-setup.html')
return HttpResponse(template.render({}, request))
def refresh_token_scope(request):
client_id = os.environ.get('TWITCH_APP_ID', None)
helix_scopes = list(
TwitchAPIScope.objects.filter(
required=True
).values_list('scope', flat=True)
)
eventsub_scopes = list(
TwitchEventSubSubscription.objects.filter(
active=True
).values_list('scope_required', flat=True)
)
scopes = list(set(settings.TOKEN_SCOPES + eventsub_scopes + helix_scopes))
scopes = list(filter(lambda x: (x is not None), scopes))
scope=' '.join(scopes)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
@api_view()
def get_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['GET'])
def get_public_url(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
public_url = config.PUBLIC_URL
return JsonResponse({'public_url': public_url})
@api_view(['POST'])
def refresh_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
token.delete()
token = Token.objects.create(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['POST'])
def reset_webhooks(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
data = request.data
if data['type'] == 'all':
teardown_all_acct_webhooks()
elif data['type'] == 'remote':
token = Token.objects.get(user=request.user)
cleanup_remote_webhooks()
elif data['type'] == 'broken':
token = Token.objects.get(user=request.user)
cleanup_webhooks()
else:
return JsonResponse({'webhooks_reset': False, 'error': 'Proper type not found.'})
config.FORCE_WEBHOOK_REFRESH = True
return JsonResponse({'webhooks_reset': True})
def process_twitch_callback_view(request):
port = os.environ.get('PORT', 8000)
params = request.GET
auth_code = params['code']
client_id = os.environ.get('TWITCH_APP_ID', None)
client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None)
auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'code': auth_code,
'grant_type': 'authorization_code',
'redirect_uri': f'{settings.BASE_URL}/twitch-callback/'
})
response_data = auth_r.json()
if(settings.DEBUG_TWITCH_CALLS):
log_request(auth_r)
config.TWITCH_ACCESS_TOKEN = response_data['access_token']
config.TWITCH_REFRESH_TOKEN = response_data['refresh_token']
expiration = timezone.now() + datetime.timedelta(seconds=response_data['expires_in'])
config.TWITCH_ACCESS_TOKEN_EXPIRATION = expiration
scope=' '.join(settings.TOKEN_SCOPES)
app_auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
'scope': scope
})
if(settings.DEBUG_TWITCH_CALLS):
log_request(app_auth_r)
app_auth_data = app_auth_r.json()
config.TWITCH_APP_ACCESS_TOKEN = app_auth_data['access_token']
config.SCOPE_UPDATED_NEEDED = False
config.SCOPES_REFRESHED = True
headers = {
'Authorization': 'Bearer {}'.format(config.TWITCH_ACCESS_TOKEN),
'Client-Id': client_id
}
user_r = requests.get('https://api.twitch.tv/helix/users', headers=headers)
if(settings.DEBUG_TWITCH_CALLS):
log_request(user_r)
user_data = user_r.json()
channel_id = user_data['data'][0]['id']
config.CHANNEL_ID = channel_id
return HttpResponseRedirect('/')
class HeartbeatViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticatedOrReadOnly, )
def list(self, request, *args, **kwargs):
response = {'message': 'pong'}
return Response(response)
class TAUSettingsViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
valid_keys = ['USE_IRC']
def list(self, request, *args, **kwargs):
response = {key.lower(): getattr(config, key) for key in self.valid_keys}
return Response(response)
def retrieve(self, request, pk=None):
if pk.upper() in self.valid_keys:
return Response({pk: getattr(config, pk.upper())})
else:
raise Http404
def update(self, request, pk=None):
if pk.upper() in self.valid_keys:
data = request.data
setattr(config, pk.upper(), data['value'])
return Response({pk: data['value']})
else:
raise Http404
class ServiceStatusViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
def update(self, request, pk=None):
if pk.startswith('STATUS_') and hasattr(config, pk):
data = request.data
new_status = data['status']
setattr(config, pk, new_status)
return Response({
pk: new_status
})
elif pk == 'SET_ALL':
status_keys = filter(
lambda x: x.startswith('STATUS_'),
constance.settings.CONFIG.keys()
)
data = request.data
new_status = data['status']
for key in status_keys:
setattr(config, key, new_status)
return Response({
'reset': 'complete'
})
else:
raise Http404("Config does not exist")
| 35.331395 | 171 | 0.63946 | [
"MIT"
] | FiniteSingularity/tau-railway | tau/core/views.py | 12,154 | Python |
def find_words(string, word_set):
if string == "" or not word_set:
return None
if string in word_set: # O(1)
return [string]
#"bedbathbeyondunk"
#{'bed', 'bath', 'bedbath', 'and', 'beyond'}
tmp = "" # bedbathbeyondunk
out = [] # []
retro = False # True
i = 0
while i < len(string): # i = 15
if not retro:
tmp += string[i]
if tmp in word_set:
out.append(tmp)
tmp = ""
if i == len(string)-1 and tmp != "":
if not out:
return None
tmp = out.pop() + tmp
retro = True
i -= 1
i += 1
return out
assert find_words(
"bedbathandbeyond",
set(['bed', 'bath', 'bedbath', 'and', 'beyond'])
) == ['bed', 'bath', 'and', 'beyond']
assert find_words(
"thequickbrownfox",
set(['quick', 'brown', 'the', 'fox'])
) == ['the', 'quick', 'brown', 'fox']
assert find_words(
"thequickbrownfoxa",
set(['quick', 'brown', 'the', 'fox'])
) == None
| 22.630435 | 52 | 0.483189 | [
"MIT"
] | xXHachimanXx/Daily-Coding | reconstruct-words.py | 1,041 | Python |
from . import test_s3
| 11 | 21 | 0.772727 | [
"MIT"
] | GlodoUK/misc-addons | ir_attachment_s3/tests/__init__.py | 22 | Python |
import json
import yaml
from pathlib import Path
from brownie import *
from substrateinterface import Keypair
from hashlib import blake2b
import base58
def get_derivative_account(root_account, index):
seed_bytes = b'modlpy/utilisuba'
root_account_bytes = bytes.fromhex(Keypair(root_account).public_key[2:])
index_bytes = int(index).to_bytes(2, 'little')
entropy = blake2b(seed_bytes + root_account_bytes + index_bytes, digest_size=32).digest()
input_bytes = bytes([42]) + entropy
checksum = blake2b(b'SS58PRE' + input_bytes).digest()
return base58.b58encode(input_bytes + checksum[:2]).decode()
class Contracts:
user = None
proxy_admin = None
lido = None
vksm = None
oracle_master = None
wstksm = None
auth_manager = None
controller = None
ledgers = None
validators = None
def __init__(self, _user, _proxy_admin, _lido, _vksm, _oracle_master, _wstksm, _auth_manager, _controller, _ledgers, _validators):
self.user = _user
self.proxy_admin = _proxy_admin
self.lido = _lido
self.vksm = _vksm
self.oracle_master = _oracle_master
self.wstksm = _wstksm
self.auth_manager = _auth_manager
self.controller = _controller
self.ledgers = _ledgers
self.validators = _validators
NETWORK="kusama"
def load_deployments(network):
path = './deployments/' + network + '.json'
if Path(path).is_file():
with open(path) as file:
return json.load(file)
else:
return {}
def load_deployment_config(network):
with open('./deployment-config.yml') as file:
return yaml.safe_load(file)['networks'][network]
CONFIG = load_deployment_config(NETWORK)
DEPLOYMENTS = load_deployments(NETWORK)
def gen_ledger_account(index):
sovereign = CONFIG['sovereign_account']
root_index = CONFIG['root_derivative_index']
controller = get_derivative_account(sovereign, root_index)
return get_derivative_account(controller, index)
#contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase
def main():
user = accounts.load(CONFIG['deployer'])
proxy_admin = ProxyAdminMock.at(DEPLOYMENTS['ProxyAdmin'])
lido = Lido.at(DEPLOYMENTS['Lido'])
vksm = vKSM_mock.at(CONFIG['precompiles']['vksm'])
oracle_master = OracleMaster.at(DEPLOYMENTS['OracleMaster'])
wstksm = WstKSM.at(DEPLOYMENTS['WstKSM'])
auth_manager = AuthManager.at(DEPLOYMENTS['AuthManager'])
controller = Controller.at(DEPLOYMENTS['Controller'])
ledgers = [ Ledger.at(addr) for addr in lido.getLedgerAddresses() ]
# current validators in moonbase
validator_1 = Keypair("5CX2ov8tmW6nZwy6Eouzc7VxFHcAyZioNm5QjEUYc7zjbS66").public_key
validator_2 = Keypair("5FRiNmoi9HFGFrY3K9xsSCeewRtA2pcXTZVZrwLacPCfvHum").public_key
validator_3 = Keypair("5EcdgHV81hu6YpPucSMrWbdQRBUr18XypiiGsgQ7HREYdrWG").public_key
validator_4 = Keypair("5FCEmzonc34D2SXXv2CMsDoFWCVivH2a2Mwe32t9BT1TcpAD").public_key
validator_5 = Keypair("5Ehgvgk1LERD5aTEWw6HLdKZurBqcRYbHXvrAtTgYPhUpr1R").public_key
validators = [validator_1, validator_2, validator_3, validator_4, validator_5]
# 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo
# 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt
# 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB
return Contracts(user, proxy_admin, lido, vksm, oracle_master, wstksm, auth_manager, controller, ledgers, validators)
| 33.825243 | 134 | 0.735075 | [
"MIT"
] | mixbytes/lido-dot-ksm | scripts/prepare_env.py | 3,484 | Python |
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
import importlib
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not bundle.data.has_key(self.instance_name):
is_related = getattr(self, 'is_related', False)
is_m2m = getattr(self, 'is_m2m', False)
if is_related and not is_m2m:
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
# Functor for safely checking if bundle.obj has a non-None property
def has_non_null_attr(obj, name):
try:
return getattr(obj, name, None) is not None
except:
if is_related:
return None
else:
raise
if self.blank:
return None
elif self.attribute and has_non_null_attr(bundle.obj, self.attribute):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and has_non_null_attr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
bundle_val = bundle.data[self.instance_name]
if bundle_val is None and not self.null:
raise ApiFieldError("The '%s' field doesn't allow a null value." % self.instance_name)
else:
return bundle_val
def set_value_on_bundle_obj(self, bundle, value):
"""
Overrideable hook for writing a value into the object on a bundle. Enables the use of
custom setters in your app code if setattr() is too raw for your fancy ORM model.
"""
try:
setattr(bundle.obj, self.attribute, value)
except Exception, e:
raise ApiFieldError("The '%s' field couldn't set value '%s': %s" %
(self.instance_name, value, e))
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return unicode(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
def hydrate(self, bundle):
value = super(DecimalField, self).hydrate(bundle)
if value and not isinstance(value, Decimal):
value = Decimal(value)
return value
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except ValueError:
pass
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self, related_instance=None):
"""
Instantiates the related resource.
"""
instance = self.to_class(api_name=self.api_name)
instance.api_name = self.api_name
return instance
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, basestring):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, related_instance):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
if not self.full:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(obj=related_instance, request=bundle.request)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(data=data, request=request)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
# We need to check to see if updates are allowed on the FK
# resource. If not, we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
if not fk_resource.can_update():
# If the resource already exists and the client specified where to find it, we look it up.
if 'resource_uri' in data:
obj = fk_resource.get_via_uri(data['resource_uri'], request=request)
fk_bundle.install_existing_obj( obj )
return fk_bundle
# If the resource supports creation, then we can full_hydrate() and create a new instance.
elif fk_resource.can_create():
return fk_resource.full_hydrate(fk_bundle)
else:
raise ApiFieldError("Resource %s does not support being created via POST" %
fk_resource._meta.resource_name)
try:
return fk_resource.obj_update(fk_bundle, **data)
except NotFound:
try:
# Attempt lookup by primary key
lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique)
if not lookup_kwargs:
raise NotFound()
return fk_resource.obj_update(fk_bundle, **lookup_kwargs)
except NotFound:
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle, request)
return fk_bundle
except MultipleObjectsReturned:
return fk_resource.full_hydrate(fk_bundle)
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class(api_name=self.api_name)
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, basestring):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif isinstance(value, Bundle):
# We got a valid bundle object, the RelatedField had full=True
return value
elif isinstance(value, dict):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.fk_resource = None
def dehydrate(self, bundle):
foreign_obj = None
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
elif callable(self.attribute):
foreign_obj = self.attribute(bundle)
if not foreign_obj:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource, foreign_obj)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.m2m_bundles = []
def dehydrate(self, bundle):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource(m2m)
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, m2m))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, basestring):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except ValueError, e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
| 34.659328 | 177 | 0.608137 | [
"BSD-3-Clause"
] | handshake/django-tastypie | tastypie/fields.py | 29,911 | Python |
from setuptools import setup, find_packages
setup(
name='bigquery-downloader',
version='3.0.0',
description="A script for downloading BigQuery data sets that are organized by day",
install_requires=[
'google-cloud-bigquery>=1.24.0',
'click>=6.0'
],
packages=find_packages(),
author='Mara contributors',
license='MIT',
entry_points={
'console_scripts': [
'download-bigquery-data=bigquery_downloader.cli:download_data'
]
},
python_requires='>=3.6'
)
| 20.846154 | 88 | 0.630996 | [
"MIT"
] | DeveloperGao16/bigquery-downloader | setup.py | 542 | Python |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CouponFreeItemAndShippingWithSubtotal(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency_code': 'str',
'items': 'list[str]',
'limit': 'int',
'shipping_methods': 'list[str]',
'subtotal_amount': 'float'
}
attribute_map = {
'currency_code': 'currency_code',
'items': 'items',
'limit': 'limit',
'shipping_methods': 'shipping_methods',
'subtotal_amount': 'subtotal_amount'
}
def __init__(self, currency_code=None, items=None, limit=None, shipping_methods=None, subtotal_amount=None):
"""
CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger
"""
self._currency_code = None
self._items = None
self._limit = None
self._shipping_methods = None
self._subtotal_amount = None
self.discriminator = None
if currency_code is not None:
self.currency_code = currency_code
if items is not None:
self.items = items
if limit is not None:
self.limit = limit
if shipping_methods is not None:
self.shipping_methods = shipping_methods
if subtotal_amount is not None:
self.subtotal_amount = subtotal_amount
@property
def currency_code(self):
"""
Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:return: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""
Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:type: str
"""
if currency_code is not None and len(currency_code) > 3:
raise ValueError("Invalid value for `currency_code`, length must be less than or equal to `3`")
self._currency_code = currency_code
@property
def items(self):
"""
Gets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:return: The items of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:param items: The items of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
"""
self._items = items
@property
def limit(self):
"""
Gets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:return: The limit of this CouponFreeItemAndShippingWithSubtotal.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""
Sets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:param limit: The limit of this CouponFreeItemAndShippingWithSubtotal.
:type: int
"""
self._limit = limit
@property
def shipping_methods(self):
"""
Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
"""
return self._shipping_methods
@shipping_methods.setter
def shipping_methods(self, shipping_methods):
"""
Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
"""
self._shipping_methods = shipping_methods
@property
def subtotal_amount(self):
"""
Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:rtype: float
"""
return self._subtotal_amount
@subtotal_amount.setter
def subtotal_amount(self, subtotal_amount):
"""
Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:type: float
"""
self._subtotal_amount = subtotal_amount
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CouponFreeItemAndShippingWithSubtotal):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 30.26556 | 112 | 0.621744 | [
"Apache-2.0"
] | gstingy/uc_python_api | ultracart/models/coupon_free_item_and_shipping_with_subtotal.py | 7,294 | Python |
import RPi.GPIO as GPIO
import time,sys, datetime, json, requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
'''
Configure raspberry
'''
GPIO.setmode(GPIO.BCM)
inpt = 13
GPIO.setup(inpt,GPIO.IN)
'''
Configure some global variables
'''
current_input = GPIO.input(inpt) # This is used to compare to the new_input later.
total_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime.
cup_movements = 200 # This is how many rotations occur as a cup of liquid passes through.
rotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event.
last_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created.
record_data = False # A flag used to trigger database insert.
data = []
print('Control C to exit')
def commit_data(data):
'''
This passes data to the data base as a single row. It then resets/empties data.
'''
url = 'http://localhost:1880/sensor'
headers = {
'Accepts': 'application/json'
}
print(f"1: {data[0]}")
send_jsn = json.dumps({"Movements": data[0][1], "Cups": data[0][2], "Gallons": data[0][3], "Liters": data[0][4]})
try:
response = requests.post(url, data=send_jsn, headers=headers)
print(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = []
return data
def prep_and_send(data,total_rotations):
'''
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
'''
total_cups = total_rotations/cup_movements
total_gallons = total_cups/16
total_liters = total_gallons*3.78541
now = datetime.datetime.now()
print('{}: Movements: {}. \nCups: {}. \nGallons: {}. \nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters))
current_data = (
now,
round(total_rotations,2),
round(total_cups,2),
round(total_gallons,2),
round(total_liters,2),
)
data.append(current_data)
print(f"datos: {data}")
data = commit_data(data)
return data
while True:
'''
This is what actually runs the whole time.
It first checks to see if new_input is different from current_input. This would be the case if there was a rotation.
Once it detects that the input is different it knows water is flowing.
It starts tracking the total_rotations and when the last rotation occured.
After each rotation it refreshes the value of the last rotation time.
It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped.
Once the water stops it passes the total_rotations to prep_and_send().
It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded.
'''
new_input = GPIO.input(inpt)
if new_input != current_input:
total_rotations += 1
if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds
record_data = True
current_input = new_input
last_movement_time = time.time() + rotation_downtime
else: #flow starts
last_movement_time = time.time() + rotation_downtime
elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change
data = prep_and_send(data,total_rotations)
record_data = False
total_rotations = 0
last_movement_time = time.time() + rotation_downtime
current_input = new_input
try:
None
#print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)
except KeyboardInterrupt:
print('\nCTRL C - Exiting nicely')
GPIO.cleanup()
sys.exit()
| 36.758333 | 140 | 0.65382 | [
"Apache-2.0"
] | ggreeve/add-pwo | software/read-sensor-python/waterFlow/waterFlowMeter.py | 4,411 | Python |
import re
import traceback
import subprocess
from serviceDB import ServiceDB
class NeadmServiceWrapper:
_service_list_cmd = ['/opt/nedge/neadm/neadm', 'service', 'list']
# _status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh']
_service_list_header = re.compile("^.*TYPE.*NAME.*SERVERID.*STATUS.*$")
# unit_id key well be added during parsing of each line
_service_list_names = ['type', 'name', 'sid', 'status']
def __init__(self, db):
self.exit_code = 0
self.db = ServiceDB(db)
def get_exit_code(self):
return self.exit_code
def get_raw_output(self, command):
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
self.exit_code = 0
return output
except subprocess.CalledProcessError as ex:
self.exit_code = ex.returncode
return ex.output
except Exception as e:
self.exit_code = 1
return "Failed to start {0} command.' \
Exeption {1}".format(command, e.output)
def get_all_services(self):
output = self.get_raw_output(NeadmServiceWrapper._service_list_cmd)
# print(output)
result = NeadmServiceList()
# error exit code
if self.exit_code:
result.exit_code = self.exit_code
result.output = output
return result
output_array = output.split('\n')
for line in output_array:
# print(line)
if NeadmServiceWrapper._service_list_header.match(line):
continue
params = line.split()
# print(params)
# print(len(params))
if len(params) < 4:
continue
service_record = {}
for name in NeadmServiceWrapper._service_list_names:
service_record[name] = params[
NeadmServiceWrapper._service_list_names.index(name)]
# check ServiceDB for sid and unit_id already joined
# add unit_id key
db_record = self.db.find(sid=service_record['sid'],
service_name=service_record['name'])
if len(db_record) == 1:
service_record['unit_id'] = db_record[0]['unit_id']
else:
service_record['unit_id'] = ''
# print(node)
result.append(service_record)
# print(status)
return result
def exec_cmd(self, cmd_name, cmd):
try:
print("\t{0} cmd is {1}".format(cmd_name, ' '.join(cmd)))
subprocess.check_output(cmd)
except Exception as ex:
raise Exception('in {0}\nMessage:{1}\nTrace: {2}'.format(
self.__class__.__name__, ex.message, traceback.format_exc()))
# is node included into service nodes list
def is_node_exist(self, service_name, sid):
services = self.get_all_services()
return services.is_already_in_service(service_name, sid)
# is iscsi service already created
def is_service_exist(self, service_name):
services = self.get_all_services()
return services.is_service_exist(service_name)
# create new iscsi(cinder) service by name
def create_iscsi_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'iscsi',
service_name]
if not self.is_service_exist(service_name):
self.exec_cmd('create_iscsi_service', cmd)
else:
print("create_iscsi_service: Service {} already exist!".format(
service_name))
# create new swift service by name
def create_swift_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'swift',
service_name]
if not self.is_service_exist(service_name):
self.exec_cmd('create_swift_service', cmd)
else:
print("create_swift_service: Service {} already exist!".format(
service_name))
# remove iscsi service by name
def delete_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'delete', service_name]
if self.is_service_exist(service_name):
self.exec_cmd('delete_service', cmd)
else:
print("remove_iscsi_service: {0} service does not exist".format(
service_name))
def is_service_enabled(self, service_name):
services = self.get_all_services()
return services.is_service_enabled(service_name)
# serve command, apply swift servie to cluster
def serve_service(self, service_name, cluster_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'serve', service_name,
cluster_name]
if not self.is_service_exist(service_name):
print("serve_service: Service {} does not exist".format(
service_name))
return
self.exec_cmd('serve_service', cmd)
# enable service if exist
def enable_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'enable', service_name]
if not self.is_service_exist(service_name):
print("enable_service: Service {} does not exist".format(
service_name))
return
if not self.is_service_enabled(service_name):
self.exec_cmd('enable_service', cmd)
else:
print("enable_service: Service {} already enabled".format(
service_name))
def disable_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'disable', service_name]
if not self.is_service_exist(service_name):
print("disable_service: Service {} does not exist".format(
service_name))
return
if self.is_service_enabled(service_name):
self.exec_cmd('disable_service', cmd)
else:
print("disable_service: Service {} already disabled".format(
service_name))
def add_node_to_service(self, service_name, sid, unit_id):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'add', service_name, sid]
if not self.is_node_exist(service_name, sid):
self.exec_cmd('add_node_to_service', cmd)
# add node to persistent db
# self.db.add(sid, unit_id, service_name)
else:
print("\tadd_node_to_service:"
"Node {0} already exist as service node".format(sid))
self.db.add(sid, unit_id, service_name)
def get_service_node_count(self, service_name):
services = self.get_all_services()
return len(services.get_service_nodes(service_name))
def remove_node_by_unit_id(self, unit_id):
service = self.db.find(unit_id=unit_id)
if len(service) > 0:
sid = service[0]['sid']
service_name = service[0]['service']
self.remove_node_from_service(service_name, sid, unit_id)
else:
print("Can't find service by unit_id:{}".format(unit_id))
def disable_service_by_unit_id(self, unit_id):
service = self.db.find(unit_id=unit_id)
if len(service) > 0:
service_name = service[0]['service']
print("service to disable is :{}".format(service_name))
self.disable_service(service_name)
else:
print("Can't find service by unit_id:{}".format(unit_id))
def remove_node_from_service(self, service_name, sid, unit_id):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'remove', service_name,
sid]
if self.is_node_exist(service_name, sid):
self.exec_cmd('remove_node_from_service', cmd)
node_count = self.get_service_node_count(service_name)
if node_count == 0:
self.delete_service(service_name)
else:
print("\tremove_node_from_service: "
"Node {} does not exist to remove".format(sid))
# remove from persistent db
self.db.remove(sid, unit_id)
def print_services(self):
service_list = self.get_all_services()
service_list.show()
class NeadmServiceList:
def __init__(self):
# service records array
self.service_records = []
self.exit_code = 0
self.output = ""
def is_correct(self):
return True if self.exit_code == 0 else False
def get_all(self):
return self.service_records
def get_service_nodes(self, service_name):
return filter(lambda service: service['name'] == service_name and
service['sid'] != '-',
self.service_records)
def get_iscsi_nodes(self):
return filter(lambda service: service['type'] == 'iscsi' and
service['sid'] != '-',
self.service_records)
def get_iscsi_nodes_by_service_name(self, service_name):
return filter(lambda service: service['type'] == 'iscsi' and
service['name'] == service_name and
service['sid'] != '-',
self.service_records)
def get_swift_nodes(self):
return filter(lambda service: service['type'] == 'swift' and
service['sid'] != '-',
self.service_records)
def get_swift_nodes_by_service_name(self, service_name):
return filter(lambda service: service['type'] == 'swift' and
service['name'] == service_name and
service['sid'] != '-',
self.service_records)
# is node present into whole services list
def is_already_listed(self, sid):
return True if filter(lambda service: service['sid'] == sid,
self.service_records) else False
# is node presented in service already
def is_already_in_service(self, service_name, sid):
return True if filter(lambda service: service['sid'] == sid and
service['name'] == service_name,
self.service_records) else False
def is_service_exist(self, service_name):
return True if filter(lambda service: service['name'] == service_name,
self.service_records) else False
def is_service_enabled(self, service_name):
nodes = self.get_service_nodes(service_name)
print(nodes)
if len(nodes) > 0:
if nodes[0]['status'] == 'enabled':
return True
return False
def append(self, service_record):
self.service_records.append(service_record)
# def show(self):
# print('TYPE\t\tNAME\t\t\tID\t\t\tSTATE\t\t\tUNIT_ID')
# for record in self.service_records:
# print("{0:<{col0}}{1:<{col1}}{2:<{col2}}"+
# "{3:<{col3}}{4:<{col4}}".format(
# record['type'],
# record['name'],
# record['sid'],
# record['status'],
# record['unit_id'],
# col0=8,
# col1=20,
# col2=36,
# col3=12,
# col4=16))
# print("")
| 36.598706 | 79 | 0.58564 | [
"Apache-2.0"
] | Nexenta/JujuCharm | nexentaedge/neadmServiceWrapper.py | 11,309 | Python |
#!/usr/bin/env python3
#
# Copyright (c) 2019 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
import os
import lgsvl
import random
import time
from pathlib import Path
import json
sim = lgsvl.Simulator(os.environ.get("SIMULATOR_HOST", "127.0.0.1"), 8181)
layer_mask = 0
layer_mask |= 1 << 0 # 0 is the layer for the road (default)
if sim.current_scene == "SanFrancisco":
sim.reset()
else:
sim.load("SanFrancisco")
# if sim.current_scene == "Testbed":
# sim.reset()
# else:
# sim.load("Testbed")
spawns = sim.get_spawn()
spawns[0].position.x = 705.6
spawns[0].position.y = 10.1
spawns[0].position.z = -308.7
spawns[0].rotation.y -= 95
forward = lgsvl.utils.transform_to_forward(spawns[0])
right = lgsvl.utils.transform_to_right(spawns[0])
state = lgsvl.AgentState()
# state.transform.position = spawns[0].position
state.transform.position = spawns[0].position
state.transform.rotation = spawns[0].rotation
ego = sim.add_agent("SingleLiDAR (Autoware)", lgsvl.AgentType.EGO, state)
ego.connect_bridge(os.environ.get("BRIDGE_HOST", "127.0.0.1"), 9090)
#------- Stand vehicle -------#
#set stand vehicle's initial position
pose_arr = [
(-3, 5),
(-3, 10),
(-3, 15),
(-3, 20),
(-5, 25),
(3, 30),
(-1, 40),
(-6, 33)
]
sv_state_arr = []
for (x, y) in pose_arr:
sv_state_arr.append(lgsvl.AgentState())
sv_state_arr[-1].transform.position = spawns[0].position + y * forward + x * right
sv_state_arr[-1].transform.rotation = spawns[0].rotation
_ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
# for i in range(30):
# sv_state_arr.append(lgsvl.AgentState())
# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right
# sv_state_arr[-1].transform.rotation = spawns[0].rotation
# _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
# for i in range(30):
# sv_state_arr.append(lgsvl.AgentState())
# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right
# sv_state_arr[-1].transform.rotation = spawns[0].rotation
# _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
sim.run()
| 24.977273 | 100 | 0.681984 | [
"MIT"
] | rubis-lab/Autoware_NDT | autoware.ai/autoware_files/lgsvl_file/scripts/testbed_scenario/sanfrancisco.py | 2,198 | Python |
"""
Space object.
Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .base import BaseModel
@dataclass
class Space(BaseModel):
"""
A class representing the space object.
"""
id: Optional[str] = field(default=None)
state: Optional[str] = field(default=None)
created_at: Optional[str] = field(default=None, repr=False)
host_ids: Optional[List[str]] = field(default=None, repr=False)
lang: Optional[str] = field(default=None, repr=False)
is_ticketed: Optional[bool] = field(default=None, repr=False)
invited_user_ids: Optional[List[str]] = field(default=None, repr=False)
participant_count: Optional[int] = field(default=None, repr=False)
scheduled_start: Optional[str] = field(default=None, repr=False)
speaker_ids: Optional[List[str]] = field(default=None, repr=False)
started_at: Optional[str] = field(default=None, repr=False)
title: Optional[str] = field(default=None, repr=False)
updated_at: Optional[str] = field(default=None, repr=False)
| 35.8125 | 95 | 0.71466 | [
"MIT"
] | MerleLiuKun/python-twitter | pytwitter/models/space.py | 1,146 | Python |
"""Make / Download Telegram Sticker Packs without installing Third Party applications
Available Commands:
.kangsticker [Optional Emoji]
.packinfo
.getsticker"""
from telethon import events
from io import BytesIO
from PIL import Image
import asyncio
import datetime
from collections import defaultdict
import math
import os
import requests
import zipfile
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.errors import MessageNotModifiedError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputMediaUploadedDocument,
InputPeerNotifySettings,
InputStickerSetID,
InputStickerSetShortName,
MessageMediaPhoto
)
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="kangsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to a photo to add to my personal sticker pack.")
return
reply_message = await event.get_reply_message()
sticker_emoji = "🔥"
input_str = event.pattern_match.group(1)
if input_str:
sticker_emoji = input_str
me = borg.me
userid = event.from_id
packname = f"{userid}'s @MC0917 Pack"
packshortname = f"MC_0917_{userid}" # format: Uni_Borg_userid
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "@MC0917_Sticker.png"
file = await borg.download_file(reply_message.media)
uploaded_sticker = None
if is_a_s:
file_ext_ns_ion = "AnimatedSticker.tgs"
uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion)
packname = f"{userid}'s @AnimatedStickersGroup"
packshortname = f"MC_0917_{userid}_as" # format: Uni_Borg_userid
elif not is_message_image(reply_message):
await event.edit("Invalid message type")
return
else:
with BytesIO(file) as mem_file, BytesIO() as sticker:
resize_image(mem_file, sticker)
sticker.seek(0)
uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion)
await event.edit("Processing this sticker. Please Wait!")
async with borg.conversation("@Stickers") as bot_conv:
now = datetime.datetime.now()
dt = now + datetime.timedelta(minutes=1)
if not await stickerset_exists(bot_conv, packshortname):
await silently_send_message(bot_conv, "/cancel")
if is_a_s:
response = await silently_send_message(bot_conv, "/newanimated")
else:
response = await silently_send_message(bot_conv, "/newpack")
if "Yay!" not in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
response = await silently_send_message(bot_conv, packname)
if not response.text.startswith("Alright!"):
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
w = await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/publish")
response = await silently_send_message(bot_conv, f"<{packname}>")
await silently_send_message(bot_conv, "/skip")
response = await silently_send_message(bot_conv, packshortname)
if response.text == "Sorry, this short name is already taken.":
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
else:
await silently_send_message(bot_conv, "/cancel")
await silently_send_message(bot_conv, "/addsticker")
await silently_send_message(bot_conv, packshortname)
await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/done")
await event.edit(f"sticker added! Your pack can be found [here](t.me/addstickers/{packshortname})")
@borg.on(admin_cmd(pattern="packinfo"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to any sticker to get it's pack info.")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("Reply to any sticker to get it's pack info.")
return
stickerset_attr_s = rep_msg.document.attributes
stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker)
if not stickerset_attr.stickerset:
await event.edit("sticker does not belong to a pack.")
return
get_stickerset = await borg(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:** {' '.join(pack_emojis)}")
@borg.on(admin_cmd(pattern="getsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
if not reply_message.sticker:
return
sticker = reply_message.sticker
sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker)
if not sticker_attrib.stickerset:
await event.reply("This sticker is not part of a pack")
return
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "webp"
file_caption = "https://t.me/RoseSupportChat/33801"
if is_a_s:
file_ext_ns_ion = "tgs"
file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information."
sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset))
pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt")
if os.path.isfile(pack_file):
os.remove(pack_file)
# Sticker emojis are retrieved as a mapping of
# <emoji>: <list of document ids that have this emoji>
# So we need to build a mapping of <document id>: <list of emoji>
# Thanks, Durov
emojis = defaultdict(str)
for pack in sticker_set.packs:
for document_id in pack.documents:
emojis[document_id] += pack.emoticon
async def download(sticker, emojis, path, file):
await borg.download_media(sticker, file=os.path.join(path, file))
with open(pack_file, "a") as f:
f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},")
pending_tasks = [
asyncio.ensure_future(
download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}")
) for i, document in enumerate(sticker_set.documents)
]
await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...")
num_tasks = len(pending_tasks)
while 1:
done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5,
return_when=asyncio.FIRST_COMPLETED)
try:
await event.edit(
f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}")
except MessageNotModifiedError:
pass
if not pending_tasks:
break
await event.edit("Downloading to my local completed")
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name
zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED)
zipdir(directory_name, zipf)
zipf.close()
await borg.send_file(
event.chat_id,
directory_name + ".zip",
caption=file_caption,
force_document=True,
allow_cache=False,
reply_to=event.message.id,
progress_callback=progress
)
try:
os.remove(directory_name + ".zip")
os.remove(directory_name)
except:
pass
await event.edit("task Completed")
await asyncio.sleep(3)
await event.delete()
else:
await event.edit("TODO: Not Implemented")
# Helpers
def is_it_animated_sticker(message):
try:
if message.media and message.media.document:
mime_type = message.media.document.mime_type
if "tgsticker" in mime_type:
return True
else:
return False
else:
return False
except:
return False
def is_message_image(message):
if message.media:
if isinstance(message.media, MessageMediaPhoto):
return True
if message.media.document:
if message.media.document.mime_type.split("/")[0] == "image":
return True
return False
return False
async def silently_send_message(conv, text):
await conv.send_message(text)
response = await conv.get_response()
await conv.mark_read(message=response)
return response
async def stickerset_exists(conv, setname):
try:
await borg(GetStickerSetRequest(InputStickerSetShortName(setname)))
response = await silently_send_message(conv, "/addsticker")
if response.text == "Invalid pack selected.":
await silently_send_message(conv, "/cancel")
return False
await silently_send_message(conv, "/cancel")
return True
except StickersetInvalidError:
return False
def resize_image(image, save_locaton):
""" Copyright Rhyse Simpson:
https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py
"""
im = Image.open(image)
maxsize = (512, 512)
if (im.width and im.height) < 512:
size1 = im.width
size2 = im.height
if im.width > im.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
im = im.resize(sizenew)
else:
im.thumbnail(maxsize)
im.save(save_locaton, "PNG")
def progress(current, total):
logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
def find_instance(items, class_or_tuple):
for item in items:
if isinstance(item, class_or_tuple):
return item
return None
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.remove(os.path.join(root, file))
| 38.401813 | 142 | 0.636771 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | FaisAFG/faisafg | stdplugins/stickers.py | 12,714 | Python |
import json
from hail.expr.types import hail_type
from hail.typecheck import *
from hail.utils.java import escape_str, escape_id
from .base_ir import *
from .matrix_writer import MatrixWriter
class I32(IR):
@typecheck_method(x=int)
def __init__(self, x):
super().__init__()
self.x = x
def copy(self):
new_instance = self.__class__
return new_instance(self.x)
def render(self, r):
return '(I32 {})'.format(self.x)
def __eq__(self, other):
return isinstance(other, I32) and \
other.x == self.x
class I64(IR):
@typecheck_method(x=int)
def __init__(self, x):
super().__init__()
self.x = x
def copy(self):
new_instance = self.__class__
return new_instance(self.x)
def render(self, r):
return '(I64 {})'.format(self.x)
def __eq__(self, other):
return isinstance(other, I64) and \
other.x == self.x
class F32(IR):
@typecheck_method(x=numeric)
def __init__(self, x):
super().__init__()
self.x = x
def copy(self):
new_instance = self.__class__
return new_instance(self.x)
def render(self, r):
return '(F32 {})'.format(self.x)
def __eq__(self, other):
return isinstance(other, F32) and \
other.x == self.x
class F64(IR):
@typecheck_method(x=numeric)
def __init__(self, x):
super().__init__()
self.x = x
def copy(self):
new_instance = self.__class__
return new_instance(self.x)
def render(self, r):
return '(F64 {})'.format(self.x)
def __eq__(self, other):
return isinstance(other, F64) and \
other.x == self.x
class Str(IR):
@typecheck_method(x=str)
def __init__(self, x):
super().__init__()
self.x = x
def copy(self):
new_instance = self.__class__
return new_instance(self.x)
def render(self, r):
return '(Str "{}")'.format(escape_str(self.x))
def __eq__(self, other):
return isinstance(other, Str) and \
other.x == self.x
class FalseIR(IR):
def __init__(self):
super().__init__()
def copy(self):
new_instance = self.__class__
return new_instance()
def render(self, r):
return '(False)'
def __eq__(self, other):
return isinstance(other, FalseIR)
class TrueIR(IR):
def __init__(self):
super().__init__()
def copy(self):
new_instance = self.__class__
return new_instance()
def render(self, r):
return '(True)'
def __eq__(self, other):
return isinstance(other, TrueIR)
class Void(IR):
def __init__(self):
super().__init__()
def copy(self):
new_instance = self.__class__
return new_instance()
def render(self, r):
return '(Void)'
def __eq__(self, other):
return isinstance(other, Void)
class Cast(IR):
@typecheck_method(v=IR, typ=hail_type)
def __init__(self, v, typ):
super().__init__(v)
self.v = v
self.typ = typ
@typecheck_method(v=IR)
def copy(self, v):
new_instance = self.__class__
return new_instance(v, self.typ)
def render(self, r):
return '(Cast {} {})'.format(self.typ._parsable_string(), r(self.v))
def __eq__(self, other):
return isinstance(other, Cast) and \
other.v == self.v and \
other.typ == self.typ
class NA(IR):
@typecheck_method(typ=hail_type)
def __init__(self, typ):
super().__init__()
self.typ = typ
def copy(self):
new_instance = self.__class__
return new_instance(self.typ)
def render(self, r):
return '(NA {})'.format(self.typ._parsable_string())
def __eq__(self, other):
return isinstance(other, NA) and \
other.typ == self.typ
class IsNA(IR):
@typecheck_method(value=IR)
def __init__(self, value):
super().__init__(value)
self.value = value
@typecheck_method(value=IR)
def copy(self, value):
new_instance = self.__class__
return new_instance(value)
def render(self, r):
return '(IsNA {})'.format(r(self.value))
def __eq__(self, other):
return isinstance(other, IsNA) and \
other.value == self.value
class If(IR):
@typecheck_method(cond=IR, cnsq=IR, altr=IR)
def __init__(self, cond, cnsq, altr):
super().__init__(cond, cnsq, altr)
self.cond = cond
self.cnsq = cnsq
self.altr = altr
@typecheck_method(cond=IR, cnsq=IR, altr=IR)
def copy(self, cond, cnsq, altr):
new_instance = self.__class__
return new_instance(cond, cnsq, altr)
def render(self, r):
return '(If {} {} {})'.format(r(self.cond), r(self.cnsq), r(self.altr))
def __eq__(self, other):
return isinstance(other, If) and \
other.cond == self.cond and \
other.cnsq == self.cnsq and \
other.altr == self.altr
class Let(IR):
@typecheck_method(name=str, value=IR, body=IR)
def __init__(self, name, value, body):
super().__init__(value, body)
self.name = name
self.value = value
self.body = body
@typecheck_method(value=IR, body=IR)
def copy(self, value, body):
new_instance = self.__class__
return new_instance(self.name, value, body)
def render(self, r):
return '(Let {} {} {})'.format(escape_id(self.name), r(self.value), r(self.body))
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, Let) and \
other.name == self.name and \
other.value == self.value and \
other.body == self.body
class Ref(IR):
@typecheck_method(name=str)
def __init__(self, name):
super().__init__()
self.name = name
def copy(self):
new_instance = self.__class__
return new_instance(self.name)
def render(self, r):
return '(Ref {})'.format(escape_id(self.name))
def __eq__(self, other):
return isinstance(other, Ref) and \
other.name == self.name
class TopLevelReference(Ref):
@typecheck_method(name=str)
def __init__(self, name):
super().__init__(name)
@property
def is_nested_field(self):
return True
def copy(self):
new_instance = self.__class__
return new_instance(self.name)
def __eq__(self, other):
return isinstance(other, TopLevelReference) and \
other.name == self.name
class ApplyBinaryOp(IR):
@typecheck_method(op=str, l=IR, r=IR)
def __init__(self, op, l, r):
super().__init__(l, r)
self.op = op
self.l = l
self.r = r
@typecheck_method(l=IR, r=IR)
def copy(self, l, r):
new_instance = self.__class__
return new_instance(self.op, l, r)
def render(self, r):
return '(ApplyBinaryPrimOp {} {} {})'.format(escape_id(self.op), r(self.l), r(self.r))
def __eq__(self, other):
return isinstance(other, ApplyBinaryOp) and \
other.op == self.op and \
other.l == self.l and \
other.r == self.r
class ApplyUnaryOp(IR):
@typecheck_method(op=str, x=IR)
def __init__(self, op, x):
super().__init__(x)
self.op = op
self.x = x
@typecheck_method(x=IR)
def copy(self, x):
new_instance = self.__class__
return new_instance(self.op, x)
def render(self, r):
return '(ApplyUnaryPrimOp {} {})'.format(escape_id(self.op), r(self.x))
def __eq__(self, other):
return isinstance(other, ApplyUnaryOp) and \
other.op == self.op and \
other.x == self.x
class ApplyComparisonOp(IR):
@typecheck_method(op=str, l=IR, r=IR)
def __init__(self, op, l, r):
super().__init__(l, r)
self.op = op
self.l = l
self.r = r
@typecheck_method(l=IR, r=IR)
def copy(self, l, r):
new_instance = self.__class__
return new_instance(self.op, l, r)
def render(self, r):
return '(ApplyComparisonOp {} {} {})'.format(escape_id(self.op), r(self.l), r(self.r))
def __eq__(self, other):
return isinstance(other, ApplyComparisonOp) and \
other.op == self.op and \
other.l == self.l and \
other.r == self.r
class MakeArray(IR):
@typecheck_method(args=sequenceof(IR), typ=nullable(hail_type))
def __init__(self, args, typ):
super().__init__(*args)
self.args = args
self.typ = typ
def copy(self, *args):
new_instance = self.__class__
return new_instance(list(args), self.typ)
def render(self, r):
return '(MakeArray {} {})'.format(
self.typ._parsable_string() if self.typ is not None else 'None',
' '.join([r(x) for x in self.args]))
def __eq__(self, other):
return isinstance(other, MakeArray) and \
other.args == self.args and \
other.typ == self.typ
class ArrayRef(IR):
@typecheck_method(a=IR, i=IR)
def __init__(self, a, i):
super().__init__(a, i)
self.a = a
self.i = i
@typecheck_method(a=IR, i=IR)
def copy(self, a, i):
new_instance = self.__class__
return new_instance(a, i)
def render(self, r):
return '(ArrayRef {} {})'.format(r(self.a), r(self.i))
def __eq__(self, other):
return isinstance(other, ArrayRef) and \
other.a == self.a and \
other.i == self.i
class ArrayLen(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
new_instance = self.__class__
return new_instance(a)
def render(self, r):
return '(ArrayLen {})'.format(r(self.a))
def __eq__(self, other):
return isinstance(other, ArrayLen) and \
other.a == self.a
class ArrayRange(IR):
@typecheck_method(start=IR, stop=IR, step=IR)
def __init__(self, start, stop, step):
super().__init__(start, stop, step)
self.start = start
self.stop = stop
self.step = step
@typecheck_method(start=IR, stop=IR, step=IR)
def copy(self, start, stop, step):
new_instance = self.__class__
return new_instance(start, stop, step)
def render(self, r):
return '(ArrayRange {} {} {})'.format(r(self.start), r(self.stop), r(self.step))
def __eq__(self, other):
return isinstance(other, ArrayRange) and \
other.start == self.start and \
other.stop == self.stop and \
other.step == self.step
class ArraySort(IR):
@typecheck_method(a=IR, ascending=IR, on_key=bool)
def __init__(self, a, ascending, on_key):
super().__init__(a, ascending)
self.a = a
self.ascending = ascending
self.on_key = on_key
@typecheck_method(a=IR, ascending=IR)
def copy(self, a, ascending):
new_instance = self.__class__
return new_instance(a, ascending, self.on_key)
def render(self, r):
return '(ArraySort {} {} {})'.format(self.on_key, r(self.a), r(self.ascending))
def __eq__(self, other):
return isinstance(other, ArraySort) and \
other.a == self.a and \
other.ascending == self.ascending and \
other.on_key == self.on_key
class ToSet(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
new_instance = self.__class__
return new_instance(a)
def render(self, r):
return '(ToSet {})'.format(r(self.a))
def __eq__(self, other):
return isinstance(other, ToSet) and \
other.a == self.a
class ToDict(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
new_instance = self.__class__
return new_instance(a)
def render(self, r):
return '(ToDict {})'.format(r(self.a))
def __eq__(self, other):
return isinstance(other, ToDict) and \
other.a == self.a
class ToArray(IR):
@typecheck_method(a=IR)
def __init__(self, a):
super().__init__(a)
self.a = a
@typecheck_method(a=IR)
def copy(self, a):
new_instance = self.__class__
return new_instance(a)
def render(self, r):
return '(ToArray {})'.format(r(self.a))
def __eq__(self, other):
return isinstance(other, ToArray) and \
other.a == self.a
class LowerBoundOnOrderedCollection(IR):
@typecheck_method(ordered_collection=IR, elem=IR, on_key=bool)
def __init__(self, ordered_collection, elem, on_key):
super().__init__(ordered_collection, elem)
self.ordered_collection = ordered_collection
self.elem = elem
self.on_key = on_key
@typecheck_method(ordered_collection=IR, elem=IR)
def copy(self, ordered_collection, elem):
new_instance = self.__class__
return new_instance(ordered_collection, elem, self.on_key)
def render(self, r):
return '(LowerBoundOnOrderedCollection {} {} {})'.format(self.on_key, r(self.ordered_collection), r(self.elem))
def __eq__(self, other):
return isinstance(other, LowerBoundOnOrderedCollection) and \
other.ordered_collection == self.ordered_collection and \
other.elem == self.elem and \
other.on_key == self.on_key
class GroupByKey(IR):
@typecheck_method(collection=IR)
def __init__(self, collection):
super().__init__(collection)
self.collection = collection
@typecheck_method(collection=IR)
def copy(self, collection):
new_instance = self.__class__
return new_instance(collection)
def render(self, r):
return '(GroupByKey {})'.format(r(self.collection))
def __eq__(self, other):
return isinstance(other, GroupByKey) and \
other.collection == self.collection
class ArrayMap(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
new_instance = self.__class__
return new_instance(a, self.name, body)
def render(self, r):
return '(ArrayMap {} {} {})'.format(escape_id(self.name), r(self.a), r(self.body))
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayMap) and \
other.a == self.a and \
other.name == self.name and \
other.body == self.body
class ArrayFilter(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
new_instance = self.__class__
return new_instance(a, self.name, body)
def render(self, r):
return '(ArrayFilter {} {} {})'.format(escape_id(self.name), r(self.a), r(self.body))
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayFilter) and \
other.a == self.a and \
other.name == self.name and \
other.body == self.body
class ArrayFlatMap(IR):
@typecheck_method(a=IR, name=str, body=IR)
def __init__(self, a, name, body):
super().__init__(a, body)
self.a = a
self.name = name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
new_instance = self.__class__
return new_instance(a, self.name, body)
def render(self, r):
return '(ArrayFlatMap {} {} {})'.format(escape_id(self.name), r(self.a), r(self.body))
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayFlatMap) and \
other.a == self.a and \
other.name == self.name and \
other.body == self.body
class ArrayFold(IR):
@typecheck_method(a=IR, zero=IR, accum_name=str, value_name=str, body=IR)
def __init__(self, a, zero, accum_name, value_name, body):
super().__init__(a, zero, body)
self.a = a
self.zero = zero
self.accum_name = accum_name
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, zero=IR, body=IR)
def copy(self, a, zero, body):
new_instance = self.__class__
return new_instance(a, zero, self.accum_name, self.value_name, body)
def render(self, r):
return '(ArrayFold {} {} {} {} {})'.format(
escape_id(self.accum_name), escape_id(self.value_name),
r(self.a), r(self.zero), r(self.body))
@property
def bound_variables(self):
return {self.accum_name, self.value_name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayFold) and \
other.a == self.a and \
other.zero == self.zero and \
other.accum_name == self.accum_name and \
other.value_name == self.value_name and \
other.body == self.body
class ArrayScan(IR):
@typecheck_method(a=IR, zero=IR, accum_name=str, value_name=str, body=IR)
def __init__(self, a, zero, accum_name, value_name, body):
super().__init__(a, zero, body)
self.a = a
self.zero = zero
self.accum_name = accum_name
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, zero=IR, body=IR)
def copy(self, a, zero, body):
new_instance = self.__class__
return new_instance(a, zero, self.accum_name, self.value_name, body)
def render(self, r):
return '(ArrayScan {} {} {} {} {})'.format(
escape_id(self.accum_name), escape_id(self.value_name),
r(self.a), r(self.zero), r(self.body))
@property
def bound_variables(self):
return {self.accum_name, self.value_name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayScan) and \
other.a == self.a and \
other.zero == self.zero and \
other.accum_name == self.accum_name and \
other.value_name == self.value_name and \
other.body == self.body
class ArrayFor(IR):
@typecheck_method(a=IR, value_name=str, body=IR)
def __init__(self, a, value_name, body):
super().__init__(a, body)
self.a = a
self.value_name = value_name
self.body = body
@typecheck_method(a=IR, body=IR)
def copy(self, a, body):
new_instance = self.__class__
return new_instance(a, self.value_name, body)
def render(self, r):
return '(ArrayFor {} {} {})'.format(escape_id(self.value_name), r(self.a), r(self.body))
@property
def bound_variables(self):
return {self.value_name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, ArrayFor) and \
other.a == self.a and \
other.value_name == self.value_name and \
other.body == self.body
class AggFilter(IR):
@typecheck_method(cond=IR, agg_ir=IR)
def __init__(self, cond, agg_ir):
super().__init__(cond, agg_ir)
self.cond = cond
self.agg_ir = agg_ir
@typecheck_method(cond=IR, agg_ir=IR)
def copy(self, cond, agg_ir):
new_instance = self.__class__
return new_instance(cond, agg_ir)
def render(self, r):
return '(AggFilter {} {})'.format(r(self.cond), r(self.agg_ir))
def __eq__(self, other):
return isinstance(other, AggFilter) and \
other.cond == self.cond and \
other.agg_ir == self.agg_ir
class AggExplode(IR):
@typecheck_method(array=IR, name=str, agg_body=IR)
def __init__(self, array, name, agg_body):
super().__init__(array, agg_body)
self.name = name
self.array = array
self.agg_body = agg_body
@typecheck_method(array=IR, agg_body=IR)
def copy(self, array, agg_body):
new_instance = self.__class__
return new_instance(array, self.name, agg_body)
def render(self, r):
return '(AggExplode {} {} {})'.format(escape_id(self.name), r(self.array), r(self.agg_body))
@property
def bound_variables(self):
return {self.name} | super().bound_variables
def __eq__(self, other):
return isinstance(other, AggExplode) and \
other.array == self.array and \
other.name == self.name and \
other.agg_body == self.agg_body
class AggGroupBy(IR):
@typecheck_method(key=IR, agg_ir=IR)
def __init__(self, key, agg_ir):
super().__init__(key, agg_ir)
self.key = key
self.agg_ir = agg_ir
@typecheck_method(key=IR, agg_ir=IR)
def copy(self, key, agg_ir):
new_instance = self.__class__
return new_instance(key, agg_ir)
def render(self, r):
return '(AggGroupBy {} {})'.format(r(self.key), r(self.agg_ir))
def __eq__(self, other):
return isinstance(other, AggGroupBy) and \
other.key == self.key and \
other.agg_ir == self.agg_ir
class BaseApplyAggOp(IR):
@typecheck_method(agg_op=str,
constructor_args=sequenceof(IR),
init_op_args=nullable(sequenceof(IR)),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, constructor_args, init_op_args, seq_op_args):
init_op_children = [] if init_op_args is None else init_op_args
super().__init__(*constructor_args, *init_op_children, *seq_op_args)
self.agg_op = agg_op
self.constructor_args = constructor_args
self.init_op_args = init_op_args
self.seq_op_args = seq_op_args
def copy(self, *args):
new_instance = self.__class__
n_seq_op_args = len(self.seq_op_args)
n_constructor_args = len(self.constructor_args)
constr_args = args[:n_constructor_args]
init_op_args = args[n_constructor_args:-n_seq_op_args]
seq_op_args = args[-n_seq_op_args:]
return new_instance(self.agg_op, constr_args, init_op_args if len(init_op_args) != 0 else None, seq_op_args)
def render(self, r):
return '({} {} ({}) {} ({}))'.format(
self.__class__.__name__,
self.agg_op,
' '.join([r(x) for x in self.constructor_args]),
'(' + ' '.join([r(x) for x in self.init_op_args]) + ')' if self.init_op_args else 'None',
' '.join([r(x) for x in self.seq_op_args]))
@property
def aggregations(self):
assert all(map(lambda c: len(c.aggregations) == 0, self.children))
return [self]
def __eq__(self, other):
return isinstance(other, self.__class__) and \
other.agg_op == self.agg_op and \
other.constructor_args == self.constructor_args and \
other.init_op_args == self.init_op_args and \
other.seq_op_args == self.seq_op_args
class ApplyAggOp(BaseApplyAggOp):
@typecheck_method(agg_op=str,
constructor_args=sequenceof(IR),
init_op_args=nullable(sequenceof(IR)),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, constructor_args, init_op_args, seq_op_args):
super().__init__(agg_op, constructor_args, init_op_args, seq_op_args)
class ApplyScanOp(BaseApplyAggOp):
@typecheck_method(agg_op=str,
constructor_args=sequenceof(IR),
init_op_args=nullable(sequenceof(IR)),
seq_op_args=sequenceof(IR))
def __init__(self, agg_op, constructor_args, init_op_args, seq_op_args):
super().__init__(agg_op, constructor_args, init_op_args, seq_op_args)
class Begin(IR):
@typecheck_method(xs=sequenceof(IR))
def __init__(self, xs):
super().__init__(*xs)
self.xs = xs
def copy(self, *xs):
new_instance = self.__class__
return new_instance(list(xs))
def render(self, r):
return '(Begin {})'.format(' '.join([r(x) for x in self.xs]))
def __eq__(self, other):
return isinstance(other, Begin) \
and other.xs == self.xs
class MakeStruct(IR):
@typecheck_method(fields=sequenceof(sized_tupleof(str, IR)))
def __init__(self, fields):
super().__init__(*[ir for (n, ir) in fields])
self.fields = fields
def copy(self, *irs):
new_instance = self.__class__
assert len(irs) == len(self.fields)
return new_instance([(n, ir) for (n, _), ir in zip(self.fields, irs)])
def render(self, r):
return '(MakeStruct {})'.format(' '.join(['({} {})'.format(escape_id(f), r(x)) for (f, x) in self.fields]))
def __eq__(self, other):
return isinstance(other, MakeStruct) \
and other.fields == self.fields
class SelectFields(IR):
@typecheck_method(old=IR, fields=sequenceof(str))
def __init__(self, old, fields):
super().__init__(old)
self.old = old
self.fields = fields
@typecheck_method(old=IR)
def copy(self, old):
new_instance = self.__class__
return new_instance(old, self.fields)
def render(self, r):
return '(SelectFields ({}) {})'.format(' '.join(map(escape_id, self.fields)), r(self.old))
def __eq__(self, other):
return isinstance(other, SelectFields) and \
other.old == self.old and \
other.fields == self.fields
class InsertFields(IR):
@typecheck_method(old=IR, fields=sequenceof(sized_tupleof(str, IR)))
def __init__(self, old, fields):
super().__init__(old, *[ir for (f, ir) in fields])
self.old = old
self.fields = fields
def copy(self, *args):
new_instance = self.__class__
assert len(args) == len(self.fields) + 1
return new_instance(args[0], [(n, ir) for (n, _), ir in zip(self.fields, args[1:])])
def render(self, r):
return '(InsertFields {} {})'.format(
self.old,
' '.join(['({} {})'.format(escape_id(f), r(x)) for (f, x) in self.fields]))
def __eq__(self, other):
return isinstance(other, InsertFields) and \
other.old == self.old and \
other.fields == self.fields
class GetField(IR):
@typecheck_method(o=IR, name=str)
def __init__(self, o, name):
super().__init__(o)
self.o = o
self.name = name
@typecheck_method(o=IR)
def copy(self, o):
new_instance = self.__class__
return new_instance(o, self.name)
def render(self, r):
return '(GetField {} {})'.format(escape_id(self.name), r(self.o))
@property
def is_nested_field(self):
return self.o.is_nested_field
def __eq__(self, other):
return isinstance(other, GetField) and \
other.o == self.o and \
other.name == self.name
class MakeTuple(IR):
@typecheck_method(elements=sequenceof(IR))
def __init__(self, elements):
super().__init__(*elements)
self.elements = elements
def copy(self, *args):
new_instance = self.__class__
return new_instance(list(args))
def render(self, r):
return '(MakeTuple {})'.format(' '.join([r(x) for x in self.elements]))
def __eq__(self, other):
return isinstance(other, MakeTuple) and \
other.elements == self.elements
class GetTupleElement(IR):
@typecheck_method(o=IR, idx=int)
def __init__(self, o, idx):
super().__init__(o)
self.o = o
self.idx = idx
@typecheck_method(o=IR)
def copy(self, o):
new_instance = self.__class__
return new_instance(o, self.idx)
def render(self, r):
return '(GetTupleElement {} {})'.format(self.idx, r(self.o))
def __eq__(self, other):
return isinstance(other, GetTupleElement) and \
other.o == self.o and \
other.idx == self.idx
class StringSlice(IR):
@typecheck_method(s=IR, start=IR, end=IR)
def __init__(self, s, start, end):
super().__init__(s, start, end)
self.s = s
self.start = start
self.end = end
@typecheck_method(s=IR, start=IR, end=IR)
def copy(self, s, start, end):
new_instance = self.__class__
return new_instance(s, start, end)
def render(self, r):
return '(StringSlice {} {} {})'.format(r(self.s), r(self.start), r(self.end))
def __eq__(self, other):
return isinstance(other, StringSlice) and \
other.s == self.s and \
other.start == self.start and \
other.end == self.end
class StringLength(IR):
@typecheck_method(s=IR)
def __init__(self, s):
super().__init__(s)
self.s = s
@typecheck_method(s=IR)
def copy(self, s):
new_instance = self.__class__
return new_instance(s)
def render(self, r):
return '(StringLength {})'.format(r(self.s))
def __eq__(self, other):
return isinstance(other, StringLength) and \
other.s == self.s
class In(IR):
@typecheck_method(i=int, typ=hail_type)
def __init__(self, i, typ):
super().__init__()
self.i = i
self.typ = typ
def copy(self):
new_instance = self.__class__
return new_instance(self.i, self.typ)
def render(self, r):
return '(In {} {})'.format(self.typ._parsable_string(), self.i)
def __eq__(self, other):
return isinstance(other, In) and \
other.i == self.i and \
other.typ == self.typ
class Die(IR):
@typecheck_method(message=IR, typ=hail_type)
def __init__(self, message, typ):
super().__init__()
self.message = message
self.typ = typ
def copy(self):
new_instance = self.__class__
return new_instance(self.message, self.typ)
def render(self, r):
return '(Die {} {})'.format(self.typ._parsable_string(), r(self.message))
def __eq__(self, other):
return isinstance(other, Die) and \
other.message == self.message and \
other.typ == self.typ
class Apply(IR):
@typecheck_method(function=str, args=IR)
def __init__(self, function, *args):
super().__init__(*args)
self.function = function
self.args = args
def copy(self, *args):
new_instance = self.__class__
return new_instance(self.function, *args)
def render(self, r):
return '(Apply {} {})'.format(escape_id(self.function), ' '.join([r(x) for x in self.args]))
def __eq__(self, other):
return isinstance(other, Apply) and \
other.function == self.function and \
other.args == self.args
class ApplySeeded(IR):
@typecheck_method(function=str, seed=int, args=IR)
def __init__(self, function, seed, *args):
super().__init__(*args)
self.function = function
self.args = args
self.seed = seed
def copy(self, *args):
new_instance = self.__class__
return new_instance(self.function, self.seed, *args)
def render(self, r):
return '(ApplySeeded {} {} {})'.format(
escape_id(self.function),
self.seed,
' '.join([r(x) for x in self.args]))
def __eq__(self, other):
return isinstance(other, Apply) and \
other.function == self.function and \
other.args == self.args
class Uniroot(IR):
@typecheck_method(argname=str, function=IR, min=IR, max=IR)
def __init__(self, argname, function, min, max):
super().__init__(function, min, max)
self.argname = argname
self.function = function
self.min = min
self.max = max
@typecheck_method(function=IR, min=IR, max=IR)
def copy(self, function, min, max):
new_instance = self.__class__
return new_instance(self.argname, function, min, max)
def render(self, r):
return '(Uniroot {} {} {} {})'.format(
escape_id(self.argname), r(self.function), r(self.min), r(self.max))
@property
def bound_variables(self):
return {self.argname} | super().bound_variables
def __eq__(self, other):
return isinstance(other, Uniroot) and \
other.argname == self.argname and \
other.function == self.function and \
other.min == self.min and \
other.max == self.max
class TableCount(IR):
@typecheck_method(child=TableIR)
def __init__(self, child):
super().__init__(child)
self.child = child
@typecheck_method(child=TableIR)
def copy(self, child):
new_instance = self.__class__
return new_instance(child)
def render(self, r):
return '(TableCount {})'.format(r(self.child))
def __eq__(self, other):
return isinstance(other, TableCount) and \
other.child == self.child
class TableAggregate(IR):
@typecheck_method(child=TableIR, query=IR)
def __init__(self, child, query):
super().__init__(child, query)
self.child = child
self.query = query
@typecheck_method(child=TableIR, query=IR)
def copy(self, child, query):
new_instance = self.__class__
return new_instance(child, query)
def render(self, r):
return '(TableAggregate {} {})'.format(r(self.child), r(self.query))
def __eq__(self, other):
return isinstance(other, TableAggregate) and \
other.child == self.child and \
other.query == self.query
class MatrixAggregate(IR):
@typecheck_method(child=MatrixIR, query=IR)
def __init__(self, child, query):
super().__init__(child, query)
self.child = child
self.query = query
@typecheck_method(child=MatrixIR, query=IR)
def copy(self, child, query):
new_instance = self.__class__
return new_instance(child, query)
def render(self, r):
return '(MatrixAggregate {} {})'.format(r(self.child), r(self.query))
def __eq__(self, other):
return isinstance(other, MatrixAggregate) and \
other.child == self.child and \
other.query == self.query
class TableWrite(IR):
@typecheck_method(child=TableIR, path=str, overwrite=bool, stage_locally=bool, _codec_spec=nullable(str))
def __init__(self, child, path, overwrite, stage_locally, _codec_spec):
super().__init__(child)
self.child = child
self.path = path
self.overwrite = overwrite
self.stage_locally = stage_locally
self._codec_spec = _codec_spec
@typecheck_method(child=TableIR)
def copy(self, child):
new_instance = self.__class__
return new_instance(child, self.path, self.overwrite, self.stage_locally, self._codec_spec)
def render(self, r):
return '(TableWrite "{}" {} {} {} {})'.format(escape_str(self.path), self.overwrite, self.stage_locally,
"\"" + escape_str(self._codec_spec) + "\"" if self._codec_spec else "None",
r(self.child))
def __eq__(self, other):
return isinstance(other, TableWrite) and \
other.child == self.child and \
other.path == self.path and \
other.overwrite == self.overwrite and \
other.stage_locally == self.stage_locally and \
other._codec_spec == self._codec_spec
class TableExport(IR):
@typecheck_method(child=TableIR,
path=str,
types_file=str,
header=bool,
export_type=hail_type)
def __init__(self, child, path, types_file, header, export_type):
super().__init__(child)
self.child = child
self.path = path
self.types_file = types_file
self.header = header
self.export_type = export_type
@typecheck_method(child=TableIR)
def copy(self, child):
new_instance = self.__class__
return new_instance(child, self.path, self.types_file, self.header, self.export_type)
def render(self, r):
return '(TableExport "{}" "{}" "{}" {} {})'.format(
escape_str(self.path),
escape_str(self.types_file),
escape_str(self.header),
self.export_type,
r(self.child))
def __eq__(self, other):
return isinstance(other, TableExport) and \
other.child == self.child and \
other.path == self.path and \
other.types_file == self.types_file and \
other.header == self.header and \
other.export_type == self.export_type
class MatrixWrite(IR):
@typecheck_method(child=MatrixIR, matrix_writer=MatrixWriter)
def __init__(self, child, matrix_writer):
super().__init__(child)
self.child = child
self.matrix_writer = matrix_writer
@typecheck_method(child=MatrixIR)
def copy(self, child):
new_instance = self.__class__
return new_instance(child, self.matrix_writer)
def render(self, r):
return '(MatrixWrite "{}" {})'.format(
r(self.matrix_writer),
r(self.child))
def __eq__(self, other):
return isinstance(other, MatrixWrite) and \
other.child == self.child and \
other.matrix_writer == self.matrix_writer
class Literal(IR):
@typecheck_method(dtype=hail_type,
value=anytype)
def __init__(self, dtype, value):
super(Literal, self).__init__()
self.dtype: 'hail.HailType' = dtype
self.value = value
def copy(self):
return Literal(self.dtype, self.value)
def render(self, r):
return f'(Literal {self.dtype._parsable_string()} ' \
f'"{escape_str(self.dtype._to_json(self.value))}")'
def __eq__(self, other):
return isinstance(other, Literal) and \
other.dtype == self.dtype and \
other.value == self.value
class Join(IR):
_idx = 0
@typecheck_method(virtual_ir=IR,
temp_vars=sequenceof(str),
join_exprs=sequenceof(anytype),
join_func=func_spec(1, anytype))
def __init__(self, virtual_ir, temp_vars, join_exprs, join_func):
super(Join, self).__init__(*(e._ir for e in join_exprs))
self.virtual_ir = virtual_ir
self.temp_vars = temp_vars
self.join_exprs = join_exprs
self.join_func = join_func
self.idx = Join._idx
Join._idx += 1
def render(self, r):
return r(self.virtual_ir)
class JavaIR(IR):
def __init__(self, jir):
self._jir = jir
def render(self, r):
return f'(JavaIR {r.add_jir(self._jir)})'
| 29.46363 | 129 | 0.590349 | [
"MIT"
] | chrisvittal/hail | hail/python/hail/ir/ir.py | 40,100 | Python |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "uexd"),
help="uexd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| 44.003086 | 113 | 0.675598 | [
"MIT"
] | ultraEX/UEX | test/functional/p2p_unrequested_blocks.py | 14,257 | Python |
import networkx
import random
def regularize_graph(graph,d):
regularized = True
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d:
regularized = False
break
while not regularized:
lost_in_degree_ids = []
full_in_degree_ids = []
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)<d:
lost_in_degree_ids.append(node_id)
elif graph.in_degree(node_id)==d:
full_in_degree_ids.append(node_id)
else:
raise Exception('In degree too large')
lost_in_degree_ids = random.sample(lost_in_degree_ids, len(lost_in_degree_ids))
lost_outdegree_ids = []
full_outdegree_ids = []
for node_id in list(graph.nodes()):
if graph.out_degree(node_id)<d:
lost_outdegree_ids.append(node_id)
elif graph.out_degree(node_id)==d:
full_outdegree_ids.append(node_id)
else:
raise Exception('Out degree too large')
lost_outdegree_ids = random.sample(lost_outdegree_ids, len(lost_outdegree_ids))
if len(lost_in_degree_ids)!=len(lost_outdegree_ids):
raise Exception('Number of missing in and out degrees do not match')
for i in range(len(lost_in_degree_ids)):
full_in_degree_ids = random.sample(full_in_degree_ids, len(full_in_degree_ids))
full_outdegree_ids = random.sample(full_outdegree_ids, len(full_outdegree_ids))
lost_in_degree_id = lost_in_degree_ids[i]
lost_outdegree_id = lost_outdegree_ids[i]
# Find appropriate (full_outdegree_id, full_in_degree_id) pair
full_in_degree_id = -1
full_outdegree_id = -1
for fod_id in full_outdegree_ids:
if fod_id!=lost_in_degree_id:
suc_ids = list(graph.successors(fod_id))
for suc_id in suc_ids:
if (suc_id in full_in_degree_ids) and (suc_id!=lost_outdegree_id):
full_in_degree_id = suc_id
full_outdegree_id = fod_id
break
if full_in_degree_id!=-1 and full_outdegree_id!=-1:
break
# Patch
graph.remove_edge(full_outdegree_id, full_in_degree_id)
graph.add_edge(full_outdegree_id, lost_in_degree_id)
graph.add_edge(lost_outdegree_id, full_in_degree_id)
regularized = True
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d:
regularized = False
break
return graph
| 39.967213 | 85 | 0.694422 | [
"CC0-1.0"
] | mablem8/dandelion-simulations-py | exact-2-regular-k/regularize_graph.py | 2,438 | Python |
#!/usr/bin/env python
# Brief: This node subscribes to /tracked_humans and publishes the predicted goal to humans based on their trajectory
# Author: Phani Teja Singamaneni
import numpy as np
import rospy
import tf
from geometry_msgs.msg import Point, PoseStamped
from human_msgs.msg import TrackedHumans, TrackedHuman, TrackedSegmentType
from human_path_prediction.msg import PredictedGoal
from scipy.stats import multivariate_normal
from std_srvs.srv import SetBool, Trigger, TriggerResponse
EPS = 1e-12
class PredictGoal(object):
def __init__(self, human_num=1):
self.human_num = human_num
# laas_adream
self.goals_x = [1.5, 7.0, 9.0, 10.5, 1.5, 10.3, 8.5]
self.goals_y = [2.0, 8.0, 12.5, 15.0, 15.0, 1.5, -4.5]
self.goal_num = 7
# maze
# self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7]
# self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59]
self.predicted_goal = PoseStamped()
self.last_idx = 0
self.changed = False
self.current_poses = [[] for i in range(self.human_num)]
self.prev_poses = [[] for i in range(self.human_num)]
self.mv_nd = multivariate_normal(mean=0,cov=0.1)
self.theta_phi = [[0]*self.goal_num for i in range(self.human_num)]
self.window_size = 10
self.probability_goal = [np.array([1.0/self.goal_num]*self.goal_num) for i in range(self.human_num)]
self.probability_goal_window = [np.array([[1.0/self.goal_num]*self.goal_num]*self.window_size) for i in range(self.human_num)]
self.done = False
self.itr = 0
NODE_NAME = "human_goal_predict"
rospy.init_node(NODE_NAME)
self.humans_sub_ = rospy.Subscriber("/tracked_humans",TrackedHumans,self.tracked_humansCB)
self.goal_pub_ = rospy.Publisher(NODE_NAME+"/predicted_goal",PredictedGoal, queue_size=2)
self.goal_srv_ = rospy.Service("goal_changed", Trigger, self.goal_changed)
rospy.spin()
def tracked_humansCB(self,msg):
self.prev_poses = self.current_poses
self.current_poses = [[] for i in range(self.human_num)]
for human in msg.humans:
for segment in human.segments:
if segment.type == TrackedSegmentType.TORSO:
self.current_poses[human.track_id-1].append(segment.pose.pose)
if not self.done:
self.prev_poses = self.current_poses
for i in range(0,len(self.current_poses[0])):
diff = np.linalg.norm([self.current_poses[0][i].position.x - self.prev_poses[0][i].position.x, self.current_poses[0][i].position.y - self.prev_poses[0][i].position.y])
if diff > EPS or not self.done:
dist = []
for j in range(0,len(self.goals_x)):
vec1 = np.array([self.goals_x[j],self.goals_y[j],0.0]) - np.array([self.current_poses[0][i].position.x,self.current_poses[0][i].position.y,0.0]) #Vector from current position to a goal
rotation = (self.current_poses[0][i].orientation.x,self.current_poses[0][i].orientation.y,self.current_poses[0][i].orientation.z,self.current_poses[0][i].orientation.w)
roll,pitch,yaw = tf.transformations.euler_from_quaternion(rotation)
unit_vec = np.array([np.cos(yaw), np.sin(yaw),0.0])
self.theta_phi[i][j] = (np.arccos(np.dot(vec1,unit_vec)/np.linalg.norm(vec1)))
dist.append(np.linalg.norm([self.current_poses[0][i].position.x - self.goals_x[j],self.current_poses[0][i].position.y - self.goals_y[j]]))
self.probability_goal_window[i][self.itr] = self.mv_nd.pdf(np.array(self.theta_phi[i]));
self.probability_goal[i] = np.array([1.0]*self.goal_num)
for k in range(0,len(self.probability_goal_window[i])):
gf = np.exp((k-self.window_size)/5)
self.probability_goal[i] = np.power(self.probability_goal_window[i][k],gf)* np.array(self.probability_goal[i]) # Linear prediction of goal
for ln in range(0,len(self.goals_x)):
self.probability_goal[i][ln] = (1/dist[ln])*self.probability_goal[i][ln];
self.probability_goal[i] = (self.probability_goal[i]-np.min(self.probability_goal[i]))/(np.max(self.probability_goal[i])-np.min(self.probability_goal[i]))
self.itr = self.itr + 1
if self.itr == self.window_size:
self.itr = 0
self.done = True
self.predict_goal()
def predict_goal(self):
idx = 0
max_prob = 0.0
p_goal = PredictedGoal()
for i in range(0,len(self.current_poses[0])):
for j in range(0,len(self.goals_x)):
if(max_prob<self.probability_goal[i][j]):
idx = j
max_prob = self.probability_goal[i][j]
self.predicted_goal.header.stamp = rospy.Time.now()
self.predicted_goal.header.frame_id = 'map'
self.predicted_goal.pose.position.x = self.goals_x[idx]
self.predicted_goal.pose.position.y = self.goals_y[idx]
self.predicted_goal.pose.position.z = 0.0
self.predicted_goal.pose.orientation = self.current_poses[0][i].orientation
if self.last_idx != idx:
p_goal.changed = True
self.changed = True
self.last_idx = idx
p_goal.goal = self.predicted_goal
self.goal_pub_.publish(p_goal)
def goal_changed(self,req):
if self.changed:
self.changed = False
return TriggerResponse(True,"Goal Changed")
return TriggerResponse(False, "Goal not changed")
if __name__ == '__main__':
predict_srv = PredictGoal(60)
| 45.507692 | 205 | 0.624577 | [
"BSD-3-Clause"
] | sphanit/CoHAN | human_path_prediction/scripts/predict_goal.py | 5,916 | Python |
# Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0)
def f1(x):
return 1
def f1(x):
return 'foo'
def f2(x):
pass
def f2(x,y):
pass
def f3(x):
return 1+x
def f3(x):
return 'asd'+x
| 14.75 | 83 | 0.59322 | [
"Apache-2.0"
] | Arvind2222/pytype | pytype/tools/merge_pyi/test_data/redefine.py | 236 | Python |
# -*- coding: utf-8 -*-
"""Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
import io
import os
import re
import sys
import warnings
from unittest import TestCase
from importlib import invalidate_caches
from io import StringIO
import nose.tools as nt
import shlex
from IPython import get_ipython
from IPython.core import magic
from IPython.core.error import UsageError
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic,
register_line_magic, register_cell_magic)
from IPython.core.magics import execution, script, code, logging, osm
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils.io import capture_output
from IPython.utils.tempdir import (TemporaryDirectory,
TemporaryWorkingDirectory)
from IPython.utils.process import find_cmd
_ip = get_ipython()
@magic.magics_class
class DummyMagics(magic.Magics): pass
def test_extract_code_ranges():
instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
expected = [(0, 1),
(2, 3),
(4, 6),
(6, 9),
(9, 14),
(16, None),
(None, 9),
(9, None),
(None, 13),
(None, None)]
actual = list(code.extract_code_ranges(instr))
nt.assert_equal(actual, expected)
def test_extract_symbols():
source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
expected = [([], ['a']),
(["def b():\n return 42\n"], []),
(["class A: pass\n"], []),
(["class A: pass\n", "def b():\n return 42\n"], []),
(["class A: pass\n"], ['a']),
([], ['z'])]
for symbols, exp in zip(symbols_args, expected):
nt.assert_equal(code.extract_symbols(source, symbols), exp)
def test_extract_symbols_raises_exception_with_non_python_code():
source = ("=begin A Ruby program :)=end\n"
"def hello\n"
"puts 'Hello world'\n"
"end")
with nt.assert_raises(SyntaxError):
code.extract_symbols(source, "hello")
def test_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.magic('doesntexist')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_cell_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.run_cell_magic('doesntexist', 'line', 'cell')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_magic_error_status():
def fail(shell):
1/0
_ip.register_magic_function(fail)
result = _ip.run_cell('%fail')
assert isinstance(result.error_in_exec, ZeroDivisionError)
def test_config():
""" test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registered as a
magic.
"""
## should not raise.
_ip.magic('config')
def test_config_available_configs():
""" test that config magic prints available configs in unique and
sorted order. """
with capture_output() as captured:
_ip.magic('config')
stdout = captured.stdout
config_classes = stdout.strip().split('\n')[1:]
nt.assert_list_equal(config_classes, sorted(set(config_classes)))
def test_config_print_class():
""" test that config with a classname prints the class's options. """
with capture_output() as captured:
_ip.magic('config TerminalInteractiveShell')
stdout = captured.stdout
if not re.match("TerminalInteractiveShell.* options", stdout.splitlines()[0]):
print(stdout)
raise AssertionError("1st line of stdout not like "
"'TerminalInteractiveShell.* options'")
def test_rehashx():
# clear up everything
_ip.alias_manager.clear_aliases()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
nt.assert_true(len(_ip.alias_manager.aliases) > 10)
for name, cmd in _ip.alias_manager.aliases:
# we must strip dots from alias names
nt.assert_not_in('.', name)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
nt.assert_true(len(scoms) > 10)
def test_magic_parse_options():
"""Test that we don't mangle paths when parsing magic options."""
ip = get_ipython()
path = 'c:\\x'
m = DummyMagics(ip)
opts = m.parse_options('-f %s' % path,'f:')[0]
# argv splitting is os-dependent
if os.name == 'posix':
expected = 'c:x'
else:
expected = path
nt.assert_equal(opts['f'], expected)
def test_magic_parse_long_options():
"""Magic.parse_options can handle --foo=bar long options"""
ip = get_ipython()
m = DummyMagics(ip)
opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
nt.assert_in('foo', opts)
nt.assert_in('bar', opts)
nt.assert_equal(opts['bar'], "bubble")
@dec.skip_without('sqlite3')
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
"""
@dec.skip_without('sqlite3')
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
"""
@dec.skip_without('sqlite3')
def doctest_hist_op():
"""Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
"""
def test_hist_pof():
ip = get_ipython()
ip.run_cell(u"1+2", store_history=True)
#raise Exception(ip.history_manager.session_number)
#raise Exception(list(ip.history_manager._get_range_session()))
with TemporaryDirectory() as td:
tf = os.path.join(td, 'hist.py')
ip.run_line_magic('history', '-pof %s' % tf)
assert os.path.isfile(tf)
@dec.skip_without('sqlite3')
def test_macro():
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
ip.magic("macro test 1-3")
nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
# List macros
nt.assert_in("test", ip.magic("macro"))
@dec.skip_without('sqlite3')
def test_macro_run():
"""Test that we can run a multi-line macro successfully."""
ip = get_ipython()
ip.history_manager.reset()
cmds = ["a=10", "a+=1", "print(a)", "%macro test 2-3"]
for cmd in cmds:
ip.run_cell(cmd, store_history=True)
nt.assert_equal(ip.user_ns["test"].value, "a+=1\nprint(a)\n")
with tt.AssertPrints("12"):
ip.run_cell("test")
with tt.AssertPrints("13"):
ip.run_cell("test")
def test_magic_magic():
"""Test %magic"""
ip = get_ipython()
with capture_output() as captured:
ip.magic("magic")
stdout = captured.stdout
nt.assert_in('%magic', stdout)
nt.assert_in('IPython', stdout)
nt.assert_in('Available', stdout)
@dec.skipif_not_numpy
def test_numpy_reset_array_undec():
"Test '%reset array' functionality"
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
nt.assert_in('a', _ip.user_ns)
_ip.magic('reset -f array')
nt.assert_not_in('a', _ip.user_ns)
def test_reset_out():
"Test '%reset out' magic"
_ip.run_cell("parrot = 'dead'", store_history=True)
# test '%reset -f out', make an Out prompt
_ip.run_cell("parrot", store_history=True)
nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
_ip.magic('reset -f out')
nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
nt.assert_equal(len(_ip.user_ns['Out']), 0)
def test_reset_in():
"Test '%reset in' magic"
# test '%reset -f in'
_ip.run_cell("parrot", store_history=True)
nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
_ip.magic('%reset -f in')
nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
def test_reset_dhist():
"Test '%reset dhist' magic"
_ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
_ip.magic('cd ' + os.path.dirname(nt.__file__))
_ip.magic('cd -')
nt.assert_true(len(_ip.user_ns['_dh']) > 0)
_ip.magic('reset -f dhist')
nt.assert_equal(len(_ip.user_ns['_dh']), 0)
_ip.run_cell("_dh = [d for d in tmp]") #restore
def test_reset_in_length():
"Test that '%reset in' preserves In[] length"
_ip.run_cell("print 'foo'")
_ip.run_cell("reset -f in")
nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
nt.assert_equal(last_line, "SyntaxError: invalid syntax")
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n"
" %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop('run', None)
with tt.AssertNotPrints("not found", channel='stderr'):
ip.run_cell("%%time\n"
"run = 0\n"
"run += 1")
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.magic('doctest_mode')
_ip.magic('doctest_mode')
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/')
curpath = os.getcwd
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.magic('cd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('cd -')
nt.assert_equal(curpath(), startdir)
_ip.magic('pushd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('popd')
nt.assert_equal(curpath(), startdir)
finally:
os.chdir(startdir)
def test_cd_force_quiet():
"""Test OSMagics.cd_force_quiet option"""
_ip.config.OSMagics.cd_force_quiet = True
osmagics = osm.OSMagics(shell=_ip)
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
with tt.AssertNotPrints(ipdir):
osmagics.cd('"%s"' % ipdir)
with tt.AssertNotPrints(startdir):
osmagics.cd('-')
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(4):
_ip.magic("xmode")
nt.assert_equal(_ip.InteractiveTB.mode, xmode)
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
nt.assert_equal(monitor, [])
_ip.magic("reset -f")
nt.assert_equal(monitor, [1])
class TestXdel(tt.TempFileMixin):
def test_xdel(self):
"""Test that references from %run are cleared by xdel."""
src = ("class A(object):\n"
" monitor = []\n"
" def __del__(self):\n"
" self.monitor.append(1)\n"
"a = A()\n")
self.mktmp(src)
# %run creates some hidden references...
_ip.magic("run %s" % self.fname)
# ... as does the displayhook.
_ip.run_cell("a")
monitor = _ip.user_ns["A"].monitor
nt.assert_equal(monitor, [])
_ip.magic("xdel a")
# Check that a's __del__ method has been called.
nt.assert_equal(monitor, [1])
def doctest_who():
"""doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
"""
def test_whos():
"""Check that whos is protected against objects where repr() fails."""
class A(object):
def __repr__(self):
raise Exception()
_ip.user_ns['a'] = A()
_ip.magic("whos")
def doctest_precision():
"""doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: '%.5f'
In [3]: f.float_format
Out[3]: '%.5f'
In [4]: %precision %e
Out[4]: '%e'
In [5]: f(3.1415927)
Out[5]: '3.141593e+00'
"""
def test_psearch():
with tt.AssertPrints("dict.fromkeys"):
_ip.run_cell("dict.fr*?")
def test_timeit_shlex():
"""test shlex issues with timeit (#1109)"""
_ip.ex("def f(*a,**kw): pass")
_ip.magic('timeit -n1 "this is a bug".count(" ")')
_ip.magic('timeit -r1 -n1 f(" ", 1)')
_ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
_ip.magic('timeit -r1 -n1 ("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b ")')
def test_timeit_special_syntax():
"Test %%timeit with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
def test_timeit_return():
"""
test whether timeit -o return object
"""
res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
assert(res is not None)
def test_timeit_quiet():
"""
test quiet option of timeit magic
"""
with tt.AssertNotPrints("loops"):
_ip.run_cell("%timeit -n1 -r1 -q 1")
def test_timeit_return_quiet():
with tt.AssertNotPrints("loops"):
res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
assert (res is not None)
def test_timeit_invalid_return():
with nt.assert_raises_regex(SyntaxError, "outside function"):
_ip.run_line_magic('timeit', 'return')
@dec.skipif(execution.profile is None)
def test_prun_special_syntax():
"Test %%prun with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('prun', '-q %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('prun', '-q', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
@dec.skipif(execution.profile is None)
def test_prun_quotes():
"Test that prun does not clobber string escapes (GH #1302)"
_ip.magic(r"prun -q x = '\t'")
nt.assert_equal(_ip.user_ns['x'], '\t')
def test_extension():
# Debugging information for failures of this test
print('sys.path:')
for p in sys.path:
print(' ', p)
print('CWD', os.getcwd())
nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
sys.path.insert(0, daft_path)
try:
_ip.user_ns.pop('arq', None)
invalidate_caches() # Clear import caches
_ip.magic("load_ext daft_extension")
nt.assert_equal(_ip.user_ns['arq'], 185)
_ip.magic("unload_ext daft_extension")
assert 'arq' not in _ip.user_ns
finally:
sys.path.remove(daft_path)
def test_notebook_export_json():
_ip = get_ipython()
_ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
for i, cmd in enumerate(cmds, start=1):
_ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.ipynb")
_ip.magic("notebook -e %s" % outfile)
class TestEnv(TestCase):
def test_env(self):
env = _ip.magic("env")
self.assertTrue(isinstance(env, dict))
def test_env_get_set_simple(self):
env = _ip.magic("env var val1")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val1')
self.assertEqual(_ip.magic("env var"), 'val1')
env = _ip.magic("env var=val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2')
def test_env_get_set_complex(self):
env = _ip.magic("env var 'val1 '' 'val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], "'val1 '' 'val2")
self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
env = _ip.magic('env var=val2 val3="val4')
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2 val3="val4')
def test_env_set_bad_input(self):
self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
def test_env_set_whitespace(self):
self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
class CellMagicTestCase(TestCase):
def check_ident(self, magic):
# Manually called, we get the result
out = _ip.run_cell_magic(magic, 'a', 'b')
nt.assert_equal(out, ('a','b'))
# Via run_cell, it goes into the user's namespace via displayhook
_ip.run_cell('%%' + magic +' c\nd\n')
nt.assert_equal(_ip.user_ns['_'], ('c','d\n'))
def test_cell_magic_func_deco(self):
"Cell magic using simple decorator"
@register_cell_magic
def cellm(line, cell):
return line, cell
self.check_ident('cellm')
def test_cell_magic_reg(self):
"Cell magic manually registered"
def cellm(line, cell):
return line, cell
_ip.register_magic_function(cellm, 'cell', 'cellm2')
self.check_ident('cellm2')
def test_cell_magic_class(self):
"Cell magics declared via a class"
@magics_class
class MyMagics(Magics):
@cell_magic
def cellm3(self, line, cell):
return line, cell
_ip.register_magics(MyMagics)
self.check_ident('cellm3')
def test_cell_magic_class2(self):
"Cell magics declared via a class, #2"
@magics_class
class MyMagics2(Magics):
@cell_magic('cellm4')
def cellm33(self, line, cell):
return line, cell
_ip.register_magics(MyMagics2)
self.check_ident('cellm4')
# Check that nothing is registered as 'cellm33'
c33 = _ip.find_cell_magic('cellm33')
nt.assert_equal(c33, None)
def test_file():
"""Basic %%writefile"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_single_quote():
"""Basic %%writefile with embedded single quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '\'file1\'')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_double_quote():
"""Basic %%writefile with embedded double quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '"file1"')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_var_expand():
"""%%writefile $filename"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.user_ns['filename'] = fname
ip.run_cell_magic("writefile", '$filename', u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_unicode():
"""%%writefile with unicode cell"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
u'liné1',
u'liné2',
]))
with io.open(fname, encoding='utf-8') as f:
s = f.read()
nt.assert_in(u'liné1\n', s)
nt.assert_in(u'liné2', s)
def test_file_amend():
"""%%writefile -a amends files"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file2')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
ip.run_cell_magic("writefile", "-a %s" % fname, u'\n'.join([
'line3',
'line4',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line3\n', s)
def test_file_spaces():
"""%%file with spaces in filename"""
ip = get_ipython()
with TemporaryWorkingDirectory() as td:
fname = "file name"
ip.run_cell_magic("file", '"%s"'%fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_script_config():
ip = get_ipython()
ip.config.ScriptMagics.script_magics = ['whoda']
sm = script.ScriptMagics(shell=ip)
nt.assert_in('whoda', sm.magics['cell'])
@dec.skip_win32
def test_script_out():
ip = get_ipython()
ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
@dec.skip_win32
def test_script_err():
ip = get_ipython()
ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_bg_out():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
ip.user_ns['output'].close()
@dec.skip_win32
def test_script_bg_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['error'].close()
@dec.skip_win32
def test_script_bg_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['output'].close()
ip.user_ns['error'].close()
def test_script_defaults():
ip = get_ipython()
for cmd in ['sh', 'bash', 'perl', 'ruby']:
try:
find_cmd(cmd)
except Exception:
pass
else:
nt.assert_in(cmd, ip.magics_manager.magics['cell'])
@magics_class
class FooFoo(Magics):
"""class with both %foo and %%foo magics"""
@line_magic('foo')
def line_foo(self, line):
"I am line foo"
pass
@cell_magic("foo")
def cell_foo(self, line, cell):
"I am cell foo, not line foo"
pass
def test_line_cell_info():
"""%%foo and %foo magics are distinguishable to inspect"""
ip = get_ipython()
ip.magics_manager.register(FooFoo)
oinfo = ip.object_inspect('foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
oinfo = ip.object_inspect('%%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
oinfo = ip.object_inspect('%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
def test_multiple_magics():
ip = get_ipython()
foo1 = FooFoo(ip)
foo2 = FooFoo(ip)
mm = ip.magics_manager
mm.register(foo1)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
mm.register(foo2)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
def test_alias_magic():
"""Test %alias_magic."""
ip = get_ipython()
mm = ip.magics_manager
# Basic operation: both cell and line magics are created, if possible.
ip.run_line_magic('alias_magic', 'timeit_alias timeit')
nt.assert_in('timeit_alias', mm.magics['line'])
nt.assert_in('timeit_alias', mm.magics['cell'])
# --cell is specified, line magic not created.
ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
nt.assert_in('timeit_cell_alias', mm.magics['cell'])
# Test that line alias is created successfully.
ip.run_line_magic('alias_magic', '--line env_alias env')
nt.assert_equal(ip.run_line_magic('env', ''),
ip.run_line_magic('env_alias', ''))
# Test that line alias with parameters passed in is created successfully.
ip.run_line_magic('alias_magic', '--line history_alias history --params ' + shlex.quote('3'))
nt.assert_in('history_alias', mm.magics['line'])
def test_save():
"""Test %save."""
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "testsave.py")
ip.run_line_magic("save", "%s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 1)
nt.assert_in('coding: utf-8', content)
ip.run_line_magic("save", "-a %s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 2)
nt.assert_in('coding: utf-8', content)
def test_store():
"""Test %store."""
ip = get_ipython()
ip.run_line_magic('load_ext', 'storemagic')
# make sure the storage is empty
ip.run_line_magic('store', '-z')
ip.user_ns['var'] = 42
ip.run_line_magic('store', 'var')
ip.user_ns['var'] = 39
ip.run_line_magic('store', '-r')
nt.assert_equal(ip.user_ns['var'], 42)
ip.run_line_magic('store', '-d var')
ip.user_ns['var'] = 39
ip.run_line_magic('store' , '-r')
nt.assert_equal(ip.user_ns['var'], 39)
def _run_edit_test(arg_s, exp_filename=None,
exp_lineno=-1,
exp_contents=None,
exp_is_temp=None):
ip = get_ipython()
M = code.CodeMagics(ip)
last_call = ['','']
opts,args = M.parse_options(arg_s,'prxn:')
filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
if exp_filename is not None:
nt.assert_equal(exp_filename, filename)
if exp_contents is not None:
with io.open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
nt.assert_equal(exp_contents, contents)
if exp_lineno != -1:
nt.assert_equal(exp_lineno, lineno)
if exp_is_temp is not None:
nt.assert_equal(exp_is_temp, is_temp)
def test_edit_interactive():
"""%edit on interactively defined objects"""
ip = get_ipython()
n = ip.execution_count
ip.run_cell(u"def foo(): return 1", store_history=True)
try:
_run_edit_test("foo")
except code.InteractivelyDefined as e:
nt.assert_equal(e.index, n)
else:
raise AssertionError("Should have raised InteractivelyDefined")
def test_edit_cell():
"""%edit [cell id]"""
ip = get_ipython()
ip.run_cell(u"def foo(): return 1", store_history=True)
# test
_run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
def test_bookmark():
ip = get_ipython()
ip.run_line_magic('bookmark', 'bmname')
with tt.AssertPrints('bmname'):
ip.run_line_magic('bookmark', '-l')
ip.run_line_magic('bookmark', '-d bmname')
def test_ls_magic():
ip = get_ipython()
json_formatter = ip.display_formatter.formatters['application/json']
json_formatter.enabled = True
lsmagic = ip.magic('lsmagic')
with warnings.catch_warnings(record=True) as w:
j = json_formatter(lsmagic)
nt.assert_equal(sorted(j), ['cell', 'line'])
nt.assert_equal(w, []) # no warnings
def test_strip_initial_indent():
def sii(s):
lines = s.splitlines()
return '\n'.join(code.strip_initial_indent(lines))
nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
nt.assert_equal(sii("a\n b"), "a\n b")
def test_logging_magic_quiet_from_arg():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart('-q {}'.format(
os.path.join(td, "quiet_from_arg.log")))
finally:
_ip.logger.logstop()
def test_logging_magic_quiet_from_config():
_ip.config.LoggingMagics.quiet = True
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "quiet_from_config.log"))
finally:
_ip.logger.logstop()
def test_logging_magic_not_quiet():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "not_quiet.log"))
finally:
_ip.logger.logstop()
def test_time_no_var_expand():
_ip.user_ns['a'] = 5
_ip.user_ns['b'] = []
_ip.magic('time b.append("{a}")')
assert _ip.user_ns['b'] == ['{a}']
# this is slow, put at the end for local testing.
def test_timeit_arguments():
"Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
if sys.version_info < (3,7):
_ip.magic("timeit ('#')")
else:
# 3.7 optimize no-op statement like above out, and complain there is
# nothing in the for loop.
_ip.magic("timeit a=('#')")
| 29.739774 | 98 | 0.596207 | [
"MIT"
] | Eviekim/waning-keyboard | env/lib/python3.6/site-packages/IPython/core/tests/test_magic.py | 34,178 | Python |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Tuple
from ethtx.models.semantics_model import (
ParameterSemantics,
EventSemantics,
FunctionSemantics,
TransformationSemantics,
)
def _decode_parameters_list(raw_parameters_list: list) -> List[ParameterSemantics]:
parameters_list = []
if not raw_parameters_list:
return parameters_list
for raw_parameter_semantics in raw_parameters_list:
if "indexed" in raw_parameter_semantics:
indexed = raw_parameter_semantics["indexed"]
else:
indexed = False
if "dynamic" in raw_parameter_semantics:
dynamic = raw_parameter_semantics["dynamic"]
else:
dynamic = False
if raw_parameter_semantics["type"] == "tuple":
components = _decode_parameters_list(raw_parameter_semantics["components"])
else:
components = []
parameters_list.append(
ParameterSemantics(
raw_parameter_semantics["name"],
raw_parameter_semantics["type"],
components,
indexed,
dynamic,
)
)
return parameters_list
def decode_events_and_functions(
abi: dict,
) -> Tuple[Dict[str, EventSemantics], Dict[str, FunctionSemantics]]:
events = dict()
for signature, raw_event_semantics in abi.get("events", {}).items():
parameters = _decode_parameters_list(raw_event_semantics.get("parameters"))
events[signature] = EventSemantics(
signature,
raw_event_semantics["anonymous"],
raw_event_semantics["name"],
parameters,
)
functions = dict()
for signature, raw_function_semantics in abi.get("functions", {}).items():
if raw_function_semantics:
inputs = _decode_parameters_list(raw_function_semantics.get("inputs"))
outputs = _decode_parameters_list(raw_function_semantics.get("outputs"))
name = raw_function_semantics["name"]
else:
inputs = outputs = []
name = signature
functions[signature] = FunctionSemantics(signature, name, inputs, outputs)
return events, functions
def decode_transformations(
raw_transformations: dict,
) -> Dict[str, Dict[str, TransformationSemantics]]:
transformations = dict()
if raw_transformations:
for signature, transformation in raw_transformations.items():
transformations[signature] = dict()
for parameter_name, parameter_transformation in transformation.get(
"arguments", dict()
).items():
transformations[signature][parameter_name] = TransformationSemantics(
parameter_transformation.get("name"),
parameter_transformation.get("type"),
parameter_transformation.get("value"),
)
return transformations
| 34.627451 | 87 | 0.65402 | [
"Apache-2.0"
] | tmierzwa/Ethtx | ethtx/decoders/decoders/semantics.py | 3,532 | Python |
"""
Support for Xiaomi Yeelight Wifi color bulb.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.yeelight/
"""
import logging
import voluptuous as vol
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
color_temperature_kelvin_to_mired as kelvin_to_mired)
from homeassistant.const import CONF_DEVICES, CONF_NAME
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP,
ATTR_FLASH, FLASH_SHORT, FLASH_LONG, ATTR_EFFECT, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP, SUPPORT_FLASH,
SUPPORT_EFFECT, Light, PLATFORM_SCHEMA, ATTR_ENTITY_ID, DOMAIN)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
REQUIREMENTS = ['yeelight==0.4.0']
_LOGGER = logging.getLogger(__name__)
LEGACY_DEVICE_TYPE_MAP = {
'color1': 'rgb',
'mono1': 'white',
'strip1': 'strip',
'bslamp1': 'bedside',
'ceiling1': 'ceiling',
}
DEFAULT_NAME = 'Yeelight'
DEFAULT_TRANSITION = 350
CONF_TRANSITION = 'transition'
CONF_SAVE_ON_CHANGE = 'save_on_change'
CONF_MODE_MUSIC = 'use_music_mode'
DATA_KEY = 'light.yeelight'
DEVICE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,
vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,
vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, })
SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS |
SUPPORT_TRANSITION |
SUPPORT_FLASH)
SUPPORT_YEELIGHT_RGB = (SUPPORT_YEELIGHT |
SUPPORT_COLOR |
SUPPORT_EFFECT |
SUPPORT_COLOR_TEMP)
YEELIGHT_MIN_KELVIN = YEELIGHT_MAX_KELVIN = 2700
YEELIGHT_RGB_MIN_KELVIN = 1700
YEELIGHT_RGB_MAX_KELVIN = 6500
EFFECT_DISCO = "Disco"
EFFECT_TEMP = "Slow Temp"
EFFECT_STROBE = "Strobe epilepsy!"
EFFECT_STROBE_COLOR = "Strobe color"
EFFECT_ALARM = "Alarm"
EFFECT_POLICE = "Police"
EFFECT_POLICE2 = "Police2"
EFFECT_CHRISTMAS = "Christmas"
EFFECT_RGB = "RGB"
EFFECT_RANDOM_LOOP = "Random Loop"
EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop"
EFFECT_SLOWDOWN = "Slowdown"
EFFECT_WHATSAPP = "WhatsApp"
EFFECT_FACEBOOK = "Facebook"
EFFECT_TWITTER = "Twitter"
EFFECT_STOP = "Stop"
YEELIGHT_EFFECT_LIST = [
EFFECT_DISCO,
EFFECT_TEMP,
EFFECT_STROBE,
EFFECT_STROBE_COLOR,
EFFECT_ALARM,
EFFECT_POLICE,
EFFECT_POLICE2,
EFFECT_CHRISTMAS,
EFFECT_RGB,
EFFECT_RANDOM_LOOP,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_SLOWDOWN,
EFFECT_WHATSAPP,
EFFECT_FACEBOOK,
EFFECT_TWITTER,
EFFECT_STOP]
SERVICE_SET_MODE = 'yeelight_set_mode'
ATTR_MODE = 'mode'
YEELIGHT_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
def _cmd(func):
"""Define a wrapper to catch exceptions from the bulb."""
def _wrap(self, *args, **kwargs):
import yeelight
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return func(self, *args, **kwargs)
except yeelight.BulbException as ex:
_LOGGER.error("Error when calling %s: %s", func, ex)
return _wrap
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yeelight bulbs."""
from yeelight.enums import PowerMode
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
lights = []
if discovery_info is not None:
_LOGGER.debug("Adding autodetected %s", discovery_info['hostname'])
device_type = discovery_info['device_type']
device_type = LEGACY_DEVICE_TYPE_MAP.get(device_type, device_type)
# Not using hostname, as it seems to vary.
name = "yeelight_%s_%s" % (device_type,
discovery_info['properties']['mac'])
host = discovery_info['host']
device = {'name': name, 'ipaddr': host}
light = YeelightLight(device, DEVICE_SCHEMA({}))
lights.append(light)
hass.data[DATA_KEY][host] = light
else:
for host, device_config in config[CONF_DEVICES].items():
device = {'name': device_config[CONF_NAME], 'ipaddr': host}
light = YeelightLight(device, device_config)
lights.append(light)
hass.data[DATA_KEY][host] = light
add_devices(lights, True)
def service_handler(service):
"""Dispatch service calls to target entities."""
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [dev for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids]
else:
target_devices = hass.data[DATA_KEY].values()
for target_device in target_devices:
if service.service == SERVICE_SET_MODE:
target_device.set_mode(**params)
service_schema_set_mode = YEELIGHT_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MODE):
vol.In([mode.name.lower() for mode in PowerMode])
})
hass.services.register(
DOMAIN, SERVICE_SET_MODE, service_handler,
schema=service_schema_set_mode)
class YeelightLight(Light):
"""Representation of a Yeelight light."""
def __init__(self, device, config):
"""Initialize the Yeelight light."""
self.config = config
self._name = device['name']
self._ipaddr = device['ipaddr']
self._supported_features = SUPPORT_YEELIGHT
self._available = False
self._bulb_device = None
self._brightness = None
self._color_temp = None
self._is_on = None
self._hs = None
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def effect_list(self):
"""Return the list of supported effects."""
return YEELIGHT_EFFECT_LIST
@property
def color_temp(self) -> int:
"""Return the color temperature."""
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._is_on
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
return self._brightness
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
if self.supported_features & SUPPORT_COLOR_TEMP:
return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN)
return kelvin_to_mired(YEELIGHT_MAX_KELVIN)
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
if self.supported_features & SUPPORT_COLOR_TEMP:
return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN)
return kelvin_to_mired(YEELIGHT_MIN_KELVIN)
def _get_hs_from_properties(self):
rgb = self._properties.get('rgb', None)
color_mode = self._properties.get('color_mode', None)
if not rgb or not color_mode:
return None
color_mode = int(color_mode)
if color_mode == 2: # color temperature
temp_in_k = mired_to_kelvin(self._color_temp)
return color_util.color_temperature_to_hs(temp_in_k)
if color_mode == 3: # hsv
hue = int(self._properties.get('hue'))
sat = int(self._properties.get('sat'))
return (hue / 360 * 65536, sat / 100 * 255)
rgb = int(rgb)
blue = rgb & 0xff
green = (rgb >> 8) & 0xff
red = (rgb >> 16) & 0xff
return color_util.color_RGB_to_hs(red, green, blue)
@property
def hs_color(self) -> tuple:
"""Return the color property."""
return self._hs
@property
def _properties(self) -> dict:
return self._bulb.last_properties
@property
def _bulb(self) -> 'yeelight.Bulb':
import yeelight
if self._bulb_device is None:
try:
self._bulb_device = yeelight.Bulb(self._ipaddr)
self._bulb_device.get_properties() # force init for type
self._available = True
except yeelight.BulbException as ex:
self._available = False
_LOGGER.error("Failed to connect to bulb %s, %s: %s",
self._ipaddr, self._name, ex)
return self._bulb_device
def set_music_mode(self, mode) -> None:
"""Set the music mode on or off."""
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music()
def update(self) -> None:
"""Update properties from the bulb."""
import yeelight
try:
self._bulb.get_properties()
if self._bulb_device.bulb_type == yeelight.BulbType.Color:
self._supported_features = SUPPORT_YEELIGHT_RGB
self._is_on = self._properties.get('power') == 'on'
bright = self._properties.get('bright', None)
if bright:
self._brightness = round(255 * (int(bright) / 100))
temp_in_k = self._properties.get('ct', None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._hs = self._get_hs_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available: # just inform once
_LOGGER.error("Unable to update bulb status: %s", ex)
self._available = False
@_cmd
def set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if brightness:
_LOGGER.debug("Setting brightness: %s", brightness)
self._bulb.set_brightness(brightness / 255 * 100,
duration=duration)
@_cmd
def set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if rgb and self.supported_features & SUPPORT_COLOR:
_LOGGER.debug("Setting RGB: %s", rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration)
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug("Setting color temp: %s K", temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration)
@_cmd
def set_default(self) -> None:
"""Set current options as default."""
self._bulb.set_default()
@_cmd
def set_flash(self, flash) -> None:
"""Activate flash."""
if flash:
from yeelight import (RGBTransition, SleepTransition, Flow,
BulbException)
if self._bulb.last_properties["color_mode"] != 1:
_LOGGER.error("Flash supported currently only in RGB mode.")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = color_util.color_hs_to_RGB(*self._hs)
transitions = list()
transitions.append(
RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(
duration=transition))
transitions.append(
RGBTransition(red, green, blue, brightness=self.brightness,
duration=duration))
flow = Flow(count=count, transitions=transitions)
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error("Unable to set flash: %s", ex)
@_cmd
def set_effect(self, effect) -> None:
"""Activate effect."""
if effect:
from yeelight import (Flow, BulbException)
from yeelight.transitions import (disco, temp, strobe, pulse,
strobe_color, alarm, police,
police2, christmas, rgb,
randomloop, slowdown)
if effect == EFFECT_STOP:
self._bulb.stop_flow()
return
if effect == EFFECT_DISCO:
flow = Flow(count=0, transitions=disco())
if effect == EFFECT_TEMP:
flow = Flow(count=0, transitions=temp())
if effect == EFFECT_STROBE:
flow = Flow(count=0, transitions=strobe())
if effect == EFFECT_STROBE_COLOR:
flow = Flow(count=0, transitions=strobe_color())
if effect == EFFECT_ALARM:
flow = Flow(count=0, transitions=alarm())
if effect == EFFECT_POLICE:
flow = Flow(count=0, transitions=police())
if effect == EFFECT_POLICE2:
flow = Flow(count=0, transitions=police2())
if effect == EFFECT_CHRISTMAS:
flow = Flow(count=0, transitions=christmas())
if effect == EFFECT_RGB:
flow = Flow(count=0, transitions=rgb())
if effect == EFFECT_RANDOM_LOOP:
flow = Flow(count=0, transitions=randomloop())
if effect == EFFECT_FAST_RANDOM_LOOP:
flow = Flow(count=0, transitions=randomloop(duration=250))
if effect == EFFECT_SLOWDOWN:
flow = Flow(count=0, transitions=slowdown())
if effect == EFFECT_WHATSAPP:
flow = Flow(count=2, transitions=pulse(37, 211, 102))
if effect == EFFECT_FACEBOOK:
flow = Flow(count=2, transitions=pulse(59, 89, 152))
if effect == EFFECT_TWITTER:
flow = Flow(count=2, transitions=pulse(0, 172, 237))
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error("Unable to set effect: %s", ex)
def turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
import yeelight
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = color_util.color_hs_to_RGB(*hs_color) if hs_color else None
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
try:
self._bulb.turn_on(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn the bulb on: %s", ex)
return
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
try:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn on music mode,"
"consider disabling it: %s", ex)
try:
# values checked for none in methods
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
self.set_effect(effect)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set bulb properties: %s", ex)
return
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE] and (brightness
or colortemp
or rgb):
try:
self.set_default()
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set the defaults: %s", ex)
return
def turn_off(self, **kwargs) -> None:
"""Turn off."""
import yeelight
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
try:
self._bulb.turn_off(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn the bulb off: %s", ex)
def set_mode(self, mode: str):
"""Set a power mode."""
import yeelight
try:
self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()])
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set the power mode: %s", ex)
| 34.885149 | 79 | 0.604586 | [
"Apache-2.0"
] | DevRGT/home-assistant | homeassistant/components/light/yeelight.py | 17,617 | Python |
import numpy as np
import sys
class RBF():
def __init__(self, Input, Output, Ptypes, Nclasses):
self.input = Input
self.hidden = Ptypes * Nclasses
self.output = Output
self.ptypes = Ptypes
self.nclasses = Nclasses
self.protos = 0
self.weights = 0
self.spread = 0
def createPrototypes(self, data):
groups = np.random.randint(0, data.shape[0], size = (self.hidden))
prototypes = np.zeros((self.hidden, data.shape[1]))
i = 0
for element in groups:
prototypes[i] = data[element, :]
i += 1
self.protos = prototypes
def sigma(self):
temp = 0
for i in range(self.hidden):
for j in range(self.hidden):
distance = np.square(np.linalg.norm(self.protos[i] - self.protos[j]))
if distance > temp:
temp = distance
self.spread = temp/np.sqrt(self.hidden)
def train(self, data, classes):
self.createPrototypes(data)
self.sigma()
hidden_out = np.zeros(shape=(0,self.hidden))
for data in data:
output=[]
for proto in self.protos:
distance = np.square(np.linalg.norm(data - proto))
neuron_output = np.exp(-(distance)/(np.square(self.spread)))
output.append(neuron_output)
hidden_out = np.vstack([hidden_out,np.array(output)])
self.weights = np.dot(np.linalg.pinv(hidden_out), classes)
def test(self, data, classes):
right = 0
for i in range(len(data)):
d = data[i]
output = []
for proto in self.protos:
distance = np.square(np.linalg.norm(d-proto))
neuron_output = np.exp(-(distance)/np.square(self.spread))
output.append(neuron_output)
network_output = np.dot(np.array(output),self.weights)
print ("Expected: ", classes[i].argmax(axis=0) +1)
print ("Result: ", network_output.argmax(axis=0) + 1)
print ()
if network_output.argmax(axis=0) + 1 == classes[i].argmax(axis=0) +1:
right += 1
print ("Accuracy(%): ", (right * 100) / len(data))
def read_iris(percentage):
dataset = np.loadtxt('iris.data', delimiter=',', skiprows=0)
np.random.shuffle(dataset)
q = int(dataset.shape[0] * percentage) + 2
X_training = dataset[0:q, 0:4]
Y_training = dataset[0:q, 4]
X_test = dataset[q:150, 0:4]
Y_test = dataset[q:150, 4]
return X_training, Y_training, X_test, Y_test
def process_iris_data(data):
p_data = np.zeros((data.shape[0], data.shape[1]))
max_col1 = np.amax(data[:,0])
max_col2 = np.amax(data[:,1])
max_col3 = np.amax(data[:,2])
max_col4 = np.amax(data[:,3])
for n in range(len(data)):
p_data[n, 0] = data[n,0] / max_col1
p_data[n, 1] = data[n,1] / max_col2
p_data[n, 2] = data[n,2] / max_col3
p_data[n, 3] = data[n,3] / max_col4
return p_data
def process_iris_labels(labels, operation):
if operation == 0:
p_labels = np.zeros((labels.shape[0], 3))
for n in range(len(labels)):
p_labels[n, int(labels[n])] = 1
return p_labels
else:
p_labels = np.argmax(labels, axis=1)
return p_labels
if __name__ == '__main__':
# input params
# percentage
parameters = (sys.argv)
print(parameters)
x1, y1, x2, y2 = read_iris(float(parameters[1]))
xp = process_iris_data(x1)
yp = process_iris_labels(y1,0)
nn = RBF(xp.shape[1], y1.shape[0], xp.shape[1], 3)
nn.train(xp, yp)
xp = process_iris_data(x2)
yp = process_iris_labels(y2,0)
nn.test(xp, yp) | 26.614379 | 85 | 0.525049 | [
"MIT"
] | LucasAntognoni/SCC0270 | Assignment 3/rbf.py | 4,072 | Python |
#-------------------------------------------------------------------------------
# Name:GUI Calculator
# Purpose:Simple calculator with GUI using tkinter
#
# Author: Daniel Campos
#
# Created: Monday Dec 1st, 2014
#-------------------------------------------------------------------------------
from tkinter import *
import math
class Calculator:
'''GUI for the calculator'''
def __init__(self, master):
self.master = master
self.stringContents = ''
self.displayStr = StringVar(self.stringContents)
self.display = Label(master, textvariable=self.displayStr, width=25, anchor=E, relief=SUNKEN).grid(row=0, columnspan=4)
self.seven = Button(master, width=3, text='7', command=lambda: self.addSymbol('7')).grid(row=1, column=0)
self.eight = Button(master, width=3, text='8', command=lambda: self.addSymbol('8')).grid(row=1, column=1)
self.nine = Button(master, width=3, text='9', command=lambda: self.addSymbol('9')).grid(row=1, column=2)
self.div = Button(master, width=3, text='/', command=lambda: self.addSymbol('/')).grid(row=1, column=3)
self.master.bind('7', self.addKeyboardSymbol)
self.master.bind('8', self.addKeyboardSymbol)
self.master.bind('9', self.addKeyboardSymbol)
self.master.bind('/', self.addKeyboardSymbol)
self.four = Button(master, width=3, text='4', command=lambda: self.addSymbol('4')).grid(row=3, column=0)
self.five = Button(master, width=3, text='5', command=lambda: self.addSymbol('5')).grid(row=3, column=1)
self.six = Button(master, width=3, text='6', command=lambda: self.addSymbol('6')).grid(row=3, column=2)
self.times = Button(master, width=3, text='*', command=lambda: self.addSymbol('*')).grid(row=3, column=3)
self.master.bind('4', self.addKeyboardSymbol)
self.master.bind('5', self.addKeyboardSymbol)
self.master.bind('6', self.addKeyboardSymbol)
self.master.bind('*', self.addKeyboardSymbol)
self.one = Button(master, width=3, text='1', command=lambda: self.addSymbol('1')).grid(row=4, column=0)
self.two = Button(master, width=3, text='2', command=lambda: self.addSymbol('2')).grid(row=4, column=1)
self.three = Button(master, width=3, text='3', command=lambda: self.addSymbol('3')).grid(row=4, column=2)
self.minus = Button(master, width=3, text='-', command=lambda: self.addSymbol('-')).grid(row=4, column=3)
self.master.bind('1', self.addKeyboardSymbol)
self.master.bind('2', self.addKeyboardSymbol)
self.master.bind('3', self.addKeyboardSymbol)
self.master.bind('-', self.addKeyboardSymbol)
self.zero = Button(master, width=3, text='0', command=lambda: self.addSymbol('0')).grid(row=5, column=0)
self.point = Button(master, width=3, text='.', command=lambda: self.addSymbol('.')).grid(row=5, column=1)
self.equals = Button(master, width=3, text='=', command=lambda: self.evaluate()).grid(row=5, column=2)
self.plus = Button(master, width=3, text='+', command=lambda: self.addSymbol('+')).grid(row=5, column=3)
self.master.bind('0', self.addKeyboardSymbol)
self.master.bind('.', self.addKeyboardSymbol)
self.master.bind('<Return>', self.evaluate)
self.master.bind('+', self.addKeyboardSymbol)
self.c = Button(master, width=3, text='C', command=lambda: self.clear()).grid(row=6, column=0)
self.d = Button(master, width=3, text='D', command=lambda: self.backSpace()).grid(row=6, column=1)
self.lparren = Button(master, width=3, text='(', command=lambda: self.addSymbol('(')).grid(row=6, column=2)
self.rparren = Button(master, width=3, text=')', command=lambda: self.addSymbol(')')).grid(row=6, column=3)
self.master.bind('C', self.clear)
self.master.bind('c', self.clear)
self.master.bind('<BackSpace>', self.backSpace)
self.master.bind('(', self.addKeyboardSymbol)
self.master.bind(')', self.addKeyboardSymbol)
def addSymbol(self, char):
'''Displays the inputted char onto the display'''
self.stringContents += char
self.displayStr.set(self.stringContents)
def addKeyboardSymbol(self,event):
'''Displays the inputted char onto the display'''
self.stringContents += str(repr(event.char))[1:-1]
self.displayStr.set(self.stringContents)
def evaluate(self, evt=None):
'''Evalutes the expression'''
try:
self.displayStr.set(eval(self.stringContents))
self.stringContents = str(eval(self.stringContents))
except Exception as e:
self.displayStr.set('Error')
self.stringContents = ''
def clear(self, evt=None):
'''Clears the expression'''
self.stringContents = ''
self.displayStr.set(self.stringContents)
def backSpace(self, evt=None):
'''Backspace on expression'''
self.stringContents = self.stringContents[:-1]
self.displayStr.set(self.stringContents)
def Main():
master = Tk()
calculator = Calculator(master)
calculator.master.title('Calculator')
calculator.master.resizable(False, False)
master.mainloop()
if __name__ == '__main__':
Main() | 54.360825 | 127 | 0.62564 | [
"MIT"
] | spacemanidol/RPICS | ProgrammingInPython/proj08_daniel_campos.py | 5,273 | Python |
"""
URLConf for Satchmo Contacts.
"""
from django.conf.urls.defaults import patterns
from signals_ahoy.signals import collect_urls
from satchmo_store import contact
from satchmo_store.shop.satchmo_settings import get_satchmo_setting
ssl = get_satchmo_setting('SSL', default_value=False)
urlpatterns = patterns('satchmo_store.contact.views',
(r'^$', 'view', {}, 'satchmo_account_info'),
(r'^update/$', 'update', {}, 'satchmo_profile_update'),
(r'^ajax_state/$', 'ajax_get_state', {'SSL': ssl}, 'satchmo_contact_ajax_state'),
)
collect_urls.send(sender=contact, patterns=urlpatterns)
| 31.473684 | 85 | 0.752508 | [
"BSD-3-Clause"
] | twidi/satchmo | satchmo/apps/satchmo_store/contact/urls.py | 598 | Python |
from pathlib import Path
CONFIG_ENV_NAME = "CYBERBOX_CONFIG_FILE"
CYBERBOX_DIR = Path(__file__).parent.resolve()
CYBERBOX_TEST_DB_URL = "CYBERBOX_TEST_DB_URL"
| 20.25 | 46 | 0.820988 | [
"MIT"
] | artslob/cyberbox | cyberbox/const.py | 162 | Python |
import sys
import requests
import argparse
import json
import os
import configparser
import arrow
from colorama import init
import traceback
def get_color(color_code):
return '\x1b[%sm' % color_code
def parse_brief(brief):
sentences = None
if args.news:
sentences = json.loads(
requests.get(
"https://corpus.vocabulary.com/api/1.0/examples.json?maxResults=5&query=" + args.word).text)[
'result']['sentences']
word = WORD_COLOR + brief['wordOut'] + ": "
if 'relation' in brief['lemma']:
word += TEXT_COLOR + (
"%s为%s的%s" % (
brief['wordOut'], brief['lemma']['lemma'],
brief['lemma']['relation']))
print(word)
pron = ""
if 'usPron' in brief:
pron += HINT_COLOR + " 美音 " + TEXT_COLOR + "/%s/" % brief['usPron'][
'ps']
if 'ukPron' in brief:
pron += HINT_COLOR + " 英音 " + TEXT_COLOR + "/%s/" % brief['ukPron'][
'ps']
if pron:
print(pron)
if 'chnDefinitions' in brief:
print(SECTION_COLOR + "中文释义")
for chn_def in brief['chnDefinitions']:
if 'pos' in chn_def:
print(
" " + HINT_COLOR + chn_def['pos'].ljust(8) + TEXT_COLOR +
chn_def[
'meaning'])
else:
print(" " + "".ljust(8) + TEXT_COLOR + chn_def['meaning'])
if 'engDefinitions' in brief:
print(SECTION_COLOR + "英文释义")
for eng_def in brief['engDefinitions']:
if 'pos' in eng_def:
print(
" " + HINT_COLOR + eng_def['pos'].ljust(8) + TEXT_COLOR +
eng_def[
'meaning'])
else:
print(" " + "".ljust(8) + TEXT_COLOR + eng_def['meaning'])
if sentences:
print(SECTION_COLOR + "新闻例句")
for i, sentence in enumerate(sentences):
print(TEXT_COLOR,
"".ljust(4) + (str(i + 1) + ".").ljust(3) + sentence[
'sentence'])
print(SOURCE_COLOR,
"".ljust(7) + sentence['volume']['corpus']['name'] + "".ljust(
4) +
arrow.get(sentence['volume']['dateAdded']).format(
"MMM DD, YYYY"))
def parse_source(sentence_group):
if 'source' not in sentence_group:
return "牛津高阶英汉双解词典"
else:
return sourceDict[sentence_group['source']]
def parse_detail(detail):
parse_brief(detail['wordBrief'])
if 'sentenceLists' in detail:
print(SECTION_COLOR + "双语例句")
for sentenceGroup in detail['sentenceLists']:
count = 1
print("".ljust(4) + HINT_COLOR + parse_source(sentenceGroup))
for sentence in sentenceGroup['sentences']:
print(TEXT_COLOR + "".ljust(8) + ("%s." % str(count)).ljust(3) +
sentence['eng'])
print("".ljust(8) + "".ljust(3) + sentence['chn'])
if count >= default_sent:
break
count += 1
init()
sourceDict = {"CAMBRIDGE": "剑桥高阶英汉双解词典", "LONGMAN": "朗文当代高级英语词典",
"COLLINS": "柯林斯英汉双解大词典", "ONLINE": "金山词霸"}
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('word', type=str, help="The word you want to query")
parser.add_argument('--detail', '-d', action='store', default=0, const=2,
nargs='?', type=int, dest='detail',
help="Show the detailed meaning of the word")
parser.add_argument('--brief', '-b', action='store_true', default=True,
help="Show the brief meaning of the word", )
parser.add_argument('--news', '-n', action='store_true', default=False,
help="Whether show sentence examples from news")
args = parser.parse_args()
if getattr(sys, 'frozen', False):
# we are running in a bundle
bundle_dir = os.path.split(sys.executable)[0]
else:
# we are running in a normal Python environment
bundle_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(bundle_dir, "color.ini")
config = configparser.ConfigParser()
config.read(config_path)
WORD_COLOR = get_color(
config.getint('COLOR', 'word_color') if config.getint('COLOR',
'word_color') else 91)
HINT_COLOR = get_color(
config.getint('COLOR', 'hint_color') if config.getint('COLOR',
'hint_color') else 92)
SECTION_COLOR = get_color(
config.getint('COLOR', 'section_color') if config.getint('COLOR',
'section_color') else 93)
TEXT_COLOR = get_color(
config.getint('COLOR', 'text_color') if config.getint('COLOR',
'text_color') else 97)
SOURCE_COLOR = get_color(
config.getint('COLOR', 'source_color') if config.getint('COLOR',
'source_color') else 90)
ENDPOINT = config.get("CONFIG", "endpoint")
detail = json.loads(
requests.get(ENDPOINT + "/word/detail?json=true&word=" + args.word).text)
default_sent = args.detail
try:
if args.detail:
parse_detail(detail)
else:
parse_brief(detail['wordBrief'])
except Exception as e:
traceback.print_exc()
print("该单词不存在")
| 36.653333 | 109 | 0.541288 | [
"MIT"
] | AntiSomnus/iDict-cmd | win_python/idict.py | 5,642 | Python |
# Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Genie
from genie.libs.ops.igmp.iosxe.igmp import Igmp
from genie.libs.ops.igmp.iosxe.tests.igmp_output import IgmpOutput
# Parser
from genie.libs.parser.iosxe.show_igmp import ShowIpIgmpInterface, \
ShowIpIgmpGroupsDetail, \
ShowIpIgmpSsmMapping
# iosxe show_vrf
from genie.libs.parser.iosxe.show_vrf import ShowVrfDetail
outputs = {}
outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default
outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1
outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default
outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1
outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1
outputs['show ip igmp ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_default_2
outputs['show ip igmp ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_default_3
outputs['show ip igmp ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_default_4
outputs['show ip igmp ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_default_5
outputs['show ip igmp ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_default_6
outputs['show ip igmp ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_default_7
outputs['show ip igmp ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_default_8
outputs['show ip igmp ssm-mapping 239.9.9.9'] = IgmpOutput.ShowIpIgmpSsmMapping_default_9
outputs['show ip igmp ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_default_10
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1
outputs['show ip igmp vrf VRF1 ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_2
outputs['show ip igmp vrf VRF1 ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_3
outputs['show ip igmp vrf VRF1 ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_4
outputs['show ip igmp vrf VRF1 ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_5
outputs['show ip igmp vrf VRF1 ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_6
outputs['show ip igmp vrf VRF1 ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_7
outputs['show ip igmp vrf VRF1 ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_8
outputs['show ip igmp vrf VRF1 ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_10
def mapper(key):
return outputs[key]
class test_igmp(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'iosxe'
self.device.mapping={}
self.device.mapping['cli']='cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Verify Ops was created successfully
self.assertEqual(igmp.info, IgmpOutput.Igmp_info)
def test_empty_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': {}}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
outputs['show ip igmp interface'] = ''
outputs['show ip igmp vrf VRF1 interface'] = ''
outputs['show ip igmp groups detail'] = ''
outputs['show ip igmp vrf VRF1 groups detail'] = ''
outputs['show ip igmp ssm-mapping 239.1.1.1'] = ''
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = ''
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# revert the outputs
outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default
outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1
outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default
outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1
outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1
# Check no attribute not found
with self.assertRaises(AttributeError):
igmp.info['vrfs']
def test_selective_attribute(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Check specific attribute values
# info - default vrf
self.assertEqual(igmp.info['vrfs']['default']['max_groups'], 20)
# info - vrf VRF1
self.assertEqual(igmp.info['vrfs']['VRF1']['interfaces']\
['GigabitEthernet2']['querier'], '20.1.2.1')
def test_incomplete_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
# overwrite output with empty output
outputs['show ip igmp vrf VRF1 groups detail'] = '''\
show ip igmp vrf VRF1 groups detail
'''
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(IgmpOutput.Igmp_info)
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['join_group'])
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['static_group'])
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['group'])
del(expect_dict['vrfs']['VRF1']['ssm_map'])
# Verify Ops was created successfully
self.assertEqual(igmp.info, expect_dict)
if __name__ == '__main__':
unittest.main()
| 41.369048 | 103 | 0.683597 | [
"Apache-2.0"
] | kecorbin/genielibs | pkgs/ops-pkg/src/genie/libs/ops/igmp/iosxe/tests/test_igmp.py | 6,950 | Python |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4242
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 25.803738 | 78 | 0.673669 | [
"MIT"
] | empyrean-project/Empyrean | contrib/linearize/linearize-hashes.py | 2,761 | Python |
import os
import yaml
import hashlib
from .util import json_dumps
class PhishingTrackerFile:
def __init__(self, __logger=None):
global logger
logger = __logger
def load_config(self, filename=None):
config = {}
try:
with open(filename, 'r') as f:
config = yaml.safe_load(f.read())
except Exception as e:
logger.fatal(e)
exit(1)
logger.info('Loaded {} items from {}'.format(len(config), filename))
return config
def save_datafile(self, data, timestamp, pathname=None):
subpath = os.path.join(data['meta']['domain_name'])
if pathname is not None:
subpath = os.path.join(pathname, data['meta']['domain_name'])
if not os.path.isdir(subpath):
os.mkdir(subpath)
md5prefix = hashlib.md5(data['meta']['reference'].encode('utf-8')).hexdigest()[0:4]
filename = os.path.join(subpath, '{}_{}_{}.json'.format(data['meta']['host_name'], md5prefix, timestamp))
with open(filename, 'w') as f:
f.write(json_dumps(data, indent=' '))
logger.info('Phish data written to {}'.format(filename))
| 27.837209 | 113 | 0.591479 | [
"BSD-2-Clause"
] | ndejong/phishing-tracker | PhishingTracker/file.py | 1,197 | Python |
from itertools import chain
import logging
import sys
from pyspark.sql import functions as f
from pyspark.sql.session import SparkSession
from pyspark.sql.window import Window
from pyspark.sql.types import ArrayType, StringType
spark = SparkSession.builder.getOrCreate()
spark.conf.set("spark.sql.execution.arrow.enabled", "true")
logger = logging.getLogger(__name__)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
log_handler.setLevel(logging.DEBUG)
logger.addHandler(log_handler)
logger.setLevel(logging.DEBUG)
input_path = '/data/raw/'
output_path = '/data/data_science/powerBI/'
def write_output(df):
logger.info("CREATING MASTER DATASET")
logger.info("WRITING: {}".format(output_path + "data_validation_with_diag.parquet"))
df.write.mode('overwrite').parquet(output_path + 'data_validation_with_diag.parquet')
return df
def main():
pcp_hcc_dropped = spark.read.csv('wasbs://[email protected]/PCP_HCC_dropped.csv', header=True, sep='|')
NW_diab = spark.read.parquet("/data/data_science/powerBI/NW_diab_cmd_memb_level.parquet")
forever = spark.read.csv('wasbs://[email protected]/ICD10-ForeverCodes.csv', header=True)
forever = forever.withColumnRenamed('ICD10 Code', 'ICD10_Code')
forever = forever.withColumnRenamed('Forever Code', 'Forever_Code')
NW_diab = NW_diab.select('BENE_MBI_ID', f.explode(f.col('diagnosis_list')).alias('diagnosis_code'), 'claim_year')
diag_forever = NW_diab.join(forever, NW_diab.diagnosis_code == forever.ICD10_Code, how='left')
diag_forever = diag_forever.select('BENE_MBI_ID', 'diagnosis_code', 'claim_year', 'Forever_Code')
diag_forever = diag_forever.filter(f.col('claim_year')>='2018')
diag_forever = diag_forever.filter(~(f.col('diagnosis_code')==''))
icd_hcc = spark.read.csv('wasbs://[email protected]/ICD10-HCC.csv', header=True)
icd_hcc = icd_hcc.select('ICD10', 'HCC')
diag_for_hcc = diag_forever.join(icd_hcc, diag_forever.diagnosis_code == icd_hcc.ICD10, how='left').drop(icd_hcc.ICD10)
diag_for_hcc = diag_for_hcc.filter(~(f.col('HCC').isNull()))
pcp_hcc_dropped = pcp_hcc_dropped.select('BENE_MBI_ID', 'claim_year', 'FINAL_PCP_NPI')
df_final = pcp_hcc_dropped.join(diag_for_hcc, on=['BENE_MBI_ID', 'claim_year'], how='left')
df_final = df_final.drop_duplicates()
write_output(df20)
df20.coalesce(1).write.mode('overwrite').option("header", "true").csv('wasbs://[email protected]/data_validation_with_diag.csv')
if __name__ == "__main__":
logger.info('START')
main()
logger.info('END')
| 43.138462 | 164 | 0.740371 | [
"Apache-2.0"
] | gma-coretechs/hccpy | hccpy/misc_scripts/forever_codes_dropped.py | 2,804 | Python |
{"filter":false,"title":"kabutan_scraping.py","tooltip":"/kabutan_scraping.py","undoManager":{"mark":100,"position":100,"stack":[[{"start":{"row":3,"column":0},"end":{"row":4,"column":0},"action":"insert","lines":["",""],"id":2},{"start":{"row":4,"column":0},"end":{"row":5,"column":0},"action":"insert","lines":["",""]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["K"],"id":3},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["a"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["b"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["u"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["t"]},{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":6},"end":{"row":4,"column":7},"action":"insert","lines":["n"],"id":4},{"start":{"row":4,"column":7},"end":{"row":4,"column":8},"action":"insert","lines":["S"]},{"start":{"row":4,"column":8},"end":{"row":4,"column":9},"action":"insert","lines":["c"]},{"start":{"row":4,"column":9},"end":{"row":4,"column":10},"action":"insert","lines":["r"]},{"start":{"row":4,"column":10},"end":{"row":4,"column":11},"action":"insert","lines":["a"]}],[{"start":{"row":4,"column":11},"end":{"row":4,"column":12},"action":"insert","lines":["p"],"id":5},{"start":{"row":4,"column":12},"end":{"row":4,"column":13},"action":"insert","lines":["i"]},{"start":{"row":4,"column":13},"end":{"row":4,"column":14},"action":"insert","lines":["n"]},{"start":{"row":4,"column":14},"end":{"row":4,"column":15},"action":"insert","lines":["g"]}],[{"start":{"row":4,"column":15},"end":{"row":4,"column":17},"action":"insert","lines":["()"],"id":6}],[{"start":{"row":4,"column":17},"end":{"row":4,"column":18},"action":"insert","lines":[":"],"id":7}],[{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "],"id":8},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":7,"column":0},"end":{"row":7,"column":4},"action":"insert","lines":[" "]},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":18},"end":{"row":5,"column":0},"action":"insert","lines":["",""],"id":9},{"start":{"row":5,"column":0},"end":{"row":5,"column":4},"action":"insert","lines":[" "]},{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""]},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":10},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["C"],"id":11},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":4,"column":5},"end":{"row":4,"column":6},"action":"insert","lines":[" "],"id":12}],[{"start":{"row":4,"column":0},"end":{"row":4,"column":5},"action":"remove","lines":["Class"],"id":13},{"start":{"row":4,"column":0},"end":{"row":4,"column":1},"action":"insert","lines":["c"]},{"start":{"row":4,"column":1},"end":{"row":4,"column":2},"action":"insert","lines":["l"]},{"start":{"row":4,"column":2},"end":{"row":4,"column":3},"action":"insert","lines":["a"]},{"start":{"row":4,"column":3},"end":{"row":4,"column":4},"action":"insert","lines":["s"]},{"start":{"row":4,"column":4},"end":{"row":4,"column":5},"action":"insert","lines":["s"]}],[{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["d"],"id":14},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["e"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":[" "],"id":15},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["_"]},{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["_"]},{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":["i"]},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":["n"]},{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["t"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"remove","lines":["t"],"id":16}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":["i"],"id":17},{"start":{"row":6,"column":13},"end":{"row":6,"column":14},"action":"insert","lines":["t"]},{"start":{"row":6,"column":14},"end":{"row":6,"column":15},"action":"insert","lines":["_"]},{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["_"]}],[{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["()"],"id":18}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":[":"],"id":19}],[{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "],"id":20},{"start":{"row":9,"column":0},"end":{"row":9,"column":4},"action":"insert","lines":[" "]},{"start":{"row":10,"column":0},"end":{"row":10,"column":4},"action":"insert","lines":[" "]},{"start":{"row":11,"column":0},"end":{"row":11,"column":4},"action":"insert","lines":[" "]},{"start":{"row":12,"column":0},"end":{"row":12,"column":4},"action":"insert","lines":[" "]},{"start":{"row":13,"column":0},"end":{"row":13,"column":4},"action":"insert","lines":[" "]},{"start":{"row":14,"column":0},"end":{"row":14,"column":4},"action":"insert","lines":[" "]},{"start":{"row":15,"column":0},"end":{"row":15,"column":4},"action":"insert","lines":[" "]},{"start":{"row":16,"column":0},"end":{"row":16,"column":4},"action":"insert","lines":[" "]},{"start":{"row":17,"column":0},"end":{"row":17,"column":4},"action":"insert","lines":[" "]},{"start":{"row":18,"column":0},"end":{"row":18,"column":4},"action":"insert","lines":[" "]},{"start":{"row":19,"column":0},"end":{"row":19,"column":4},"action":"insert","lines":[" "]},{"start":{"row":20,"column":0},"end":{"row":20,"column":4},"action":"insert","lines":[" "]},{"start":{"row":21,"column":0},"end":{"row":21,"column":4},"action":"insert","lines":[" "]},{"start":{"row":22,"column":0},"end":{"row":22,"column":4},"action":"insert","lines":[" "]},{"start":{"row":23,"column":0},"end":{"row":23,"column":4},"action":"insert","lines":[" "]},{"start":{"row":24,"column":0},"end":{"row":24,"column":4},"action":"insert","lines":[" "]},{"start":{"row":25,"column":0},"end":{"row":25,"column":4},"action":"insert","lines":[" "]},{"start":{"row":26,"column":0},"end":{"row":26,"column":4},"action":"insert","lines":[" "]},{"start":{"row":27,"column":0},"end":{"row":27,"column":4},"action":"insert","lines":[" "]},{"start":{"row":28,"column":0},"end":{"row":28,"column":4},"action":"insert","lines":[" "]},{"start":{"row":29,"column":0},"end":{"row":29,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":6,"column":17},"end":{"row":6,"column":18},"action":"insert","lines":["s"],"id":21},{"start":{"row":6,"column":18},"end":{"row":6,"column":19},"action":"insert","lines":["e"]},{"start":{"row":6,"column":19},"end":{"row":6,"column":20},"action":"insert","lines":["l"]},{"start":{"row":6,"column":20},"end":{"row":6,"column":21},"action":"insert","lines":["f"]}],[{"start":{"row":5,"column":4},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":22},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":5,"column":4},"end":{"row":5,"column":6},"action":"insert","lines":["''"],"id":23}],[{"start":{"row":5,"column":6},"end":{"row":5,"column":7},"action":"insert","lines":["'"],"id":24}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":25},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["'"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["'"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["'"]}],[{"start":{"row":5,"column":7},"end":{"row":6,"column":0},"action":"insert","lines":["",""],"id":26},{"start":{"row":6,"column":0},"end":{"row":6,"column":4},"action":"insert","lines":[" "]},{"start":{"row":6,"column":4},"end":{"row":6,"column":5},"action":"insert","lines":["@"]},{"start":{"row":6,"column":5},"end":{"row":6,"column":6},"action":"insert","lines":["b"]},{"start":{"row":6,"column":6},"end":{"row":6,"column":7},"action":"insert","lines":["r"]},{"start":{"row":6,"column":7},"end":{"row":6,"column":8},"action":"insert","lines":["i"]},{"start":{"row":6,"column":8},"end":{"row":6,"column":9},"action":"insert","lines":["e"]}],[{"start":{"row":6,"column":9},"end":{"row":6,"column":10},"action":"insert","lines":["f"],"id":27}],[{"start":{"row":6,"column":10},"end":{"row":6,"column":11},"action":"insert","lines":[" "],"id":28},{"start":{"row":6,"column":11},"end":{"row":6,"column":12},"action":"insert","lines":[":"]}],[{"start":{"row":6,"column":12},"end":{"row":6,"column":13},"action":"insert","lines":[" "],"id":29},{"start":{"row":6,"column":13},"end":{"row":6,"column":15},"action":"insert","lines":["株価"]}],[{"start":{"row":6,"column":15},"end":{"row":6,"column":16},"action":"insert","lines":["を"],"id":30},{"start":{"row":6,"column":16},"end":{"row":6,"column":18},"action":"insert","lines":["取得"]}],[{"start":{"row":6,"column":18},"end":{"row":6,"column":20},"action":"insert","lines":["する"],"id":31},{"start":{"row":6,"column":20},"end":{"row":6,"column":23},"action":"insert","lines":["クラス"]}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":32},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":33},{"start":{"row":34,"column":0},"end":{"row":34,"column":4},"action":"remove","lines":[" "]}],[{"start":{"row":34,"column":0},"end":{"row":34,"column":1},"action":"insert","lines":["k"],"id":34},{"start":{"row":34,"column":1},"end":{"row":34,"column":2},"action":"insert","lines":["a"]},{"start":{"row":34,"column":2},"end":{"row":34,"column":3},"action":"insert","lines":["b"]},{"start":{"row":34,"column":3},"end":{"row":34,"column":4},"action":"insert","lines":["u"]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":[" "],"id":35},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["="]}],[{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":[" "],"id":36},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["k"]},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"remove","lines":["a"],"id":37},{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"remove","lines":["k"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":["K"],"id":38},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["a"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":9},"action":"remove","lines":["Ka"],"id":39},{"start":{"row":34,"column":7},"end":{"row":34,"column":22},"action":"insert","lines":["KabutanScraping"]}],[{"start":{"row":34,"column":22},"end":{"row":34,"column":24},"action":"insert","lines":["()"],"id":40}],[{"start":{"row":34,"column":24},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":41}],[{"start":{"row":32,"column":28},"end":{"row":33,"column":0},"action":"insert","lines":["",""],"id":42},{"start":{"row":33,"column":0},"end":{"row":33,"column":8},"action":"insert","lines":[" "]},{"start":{"row":33,"column":8},"end":{"row":34,"column":0},"action":"insert","lines":["",""]},{"start":{"row":34,"column":0},"end":{"row":34,"column":8},"action":"insert","lines":[" "]}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":8},"action":"remove","lines":[" "],"id":43}],[{"start":{"row":34,"column":4},"end":{"row":34,"column":5},"action":"insert","lines":["d"],"id":44},{"start":{"row":34,"column":5},"end":{"row":34,"column":6},"action":"insert","lines":["e"]},{"start":{"row":34,"column":6},"end":{"row":34,"column":7},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":7},"end":{"row":34,"column":8},"action":"insert","lines":[" "],"id":45},{"start":{"row":34,"column":8},"end":{"row":34,"column":9},"action":"insert","lines":["d"]},{"start":{"row":34,"column":9},"end":{"row":34,"column":10},"action":"insert","lines":["i"]},{"start":{"row":34,"column":10},"end":{"row":34,"column":11},"action":"insert","lines":["s"]},{"start":{"row":34,"column":11},"end":{"row":34,"column":12},"action":"insert","lines":["p"]}],[{"start":{"row":34,"column":12},"end":{"row":34,"column":13},"action":"insert","lines":["l"],"id":46},{"start":{"row":34,"column":13},"end":{"row":34,"column":14},"action":"insert","lines":["a"]},{"start":{"row":34,"column":14},"end":{"row":34,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":34,"column":15},"end":{"row":34,"column":17},"action":"insert","lines":["()"],"id":47}],[{"start":{"row":34,"column":16},"end":{"row":34,"column":17},"action":"insert","lines":["s"],"id":48},{"start":{"row":34,"column":17},"end":{"row":34,"column":18},"action":"insert","lines":["e"]},{"start":{"row":34,"column":18},"end":{"row":34,"column":19},"action":"insert","lines":["l"]},{"start":{"row":34,"column":19},"end":{"row":34,"column":20},"action":"insert","lines":["f"]}],[{"start":{"row":34,"column":21},"end":{"row":34,"column":22},"action":"insert","lines":[":"],"id":49}],[{"start":{"row":34,"column":22},"end":{"row":35,"column":0},"action":"insert","lines":["",""],"id":50},{"start":{"row":35,"column":0},"end":{"row":35,"column":8},"action":"insert","lines":[" "]},{"start":{"row":35,"column":8},"end":{"row":35,"column":9},"action":"insert","lines":["d"]},{"start":{"row":35,"column":9},"end":{"row":35,"column":10},"action":"insert","lines":["i"]},{"start":{"row":35,"column":10},"end":{"row":35,"column":11},"action":"insert","lines":["s"]}],[{"start":{"row":35,"column":11},"end":{"row":35,"column":12},"action":"insert","lines":["p"],"id":51},{"start":{"row":35,"column":12},"end":{"row":35,"column":13},"action":"insert","lines":["l"]},{"start":{"row":35,"column":13},"end":{"row":35,"column":14},"action":"insert","lines":["a"]},{"start":{"row":35,"column":14},"end":{"row":35,"column":15},"action":"insert","lines":["y"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":17},"action":"insert","lines":["()"],"id":52}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["d"],"id":53},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"remove","lines":["e"],"id":54}],[{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["f"],"id":55},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["A"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["l"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["l"]}],[{"start":{"row":35,"column":21},"end":{"row":35,"column":22},"action":"insert","lines":["B"],"id":56},{"start":{"row":35,"column":22},"end":{"row":35,"column":23},"action":"insert","lines":["r"]},{"start":{"row":35,"column":23},"end":{"row":35,"column":24},"action":"insert","lines":["a"]},{"start":{"row":35,"column":24},"end":{"row":35,"column":25},"action":"insert","lines":["n"]},{"start":{"row":35,"column":25},"end":{"row":35,"column":26},"action":"insert","lines":["d"]},{"start":{"row":35,"column":26},"end":{"row":35,"column":27},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["k"],"id":57},{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":["a"]},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["b"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["u"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["."]},{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":["d"]},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["i"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["s"]}],[{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["p"],"id":58},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["l"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["a"]},{"start":{"row":38,"column":11},"end":{"row":38,"column":12},"action":"insert","lines":["y"]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["#"],"id":59}],[{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["s"],"id":60},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["e"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]},{"start":{"row":35,"column":19},"end":{"row":35,"column":20},"action":"insert","lines":["f"]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"insert","lines":["",""],"id":61},{"start":{"row":36,"column":0},"end":{"row":36,"column":8},"action":"insert","lines":[" "]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["."],"id":62},{"start":{"row":36,"column":4},"end":{"row":36,"column":8},"action":"remove","lines":[" "]},{"start":{"row":36,"column":0},"end":{"row":36,"column":4},"action":"remove","lines":[" "]},{"start":{"row":35,"column":21},"end":{"row":36,"column":0},"action":"remove","lines":["",""]},{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"remove","lines":["."]}],[{"start":{"row":35,"column":20},"end":{"row":35,"column":21},"action":"insert","lines":["."],"id":63}],[{"start":{"row":22,"column":16},"end":{"row":22,"column":17},"action":"insert","lines":["#"],"id":64}],[{"start":{"row":7,"column":7},"end":{"row":8,"column":0},"action":"insert","lines":["",""],"id":65},{"start":{"row":8,"column":0},"end":{"row":8,"column":4},"action":"insert","lines":[" "]}],[{"start":{"row":8,"column":4},"end":{"row":8,"column":5},"action":"insert","lines":["d"],"id":66},{"start":{"row":8,"column":5},"end":{"row":8,"column":6},"action":"insert","lines":["f"]}],[{"start":{"row":8,"column":6},"end":{"row":8,"column":7},"action":"insert","lines":["A"],"id":67},{"start":{"row":8,"column":7},"end":{"row":8,"column":8},"action":"insert","lines":["l"]},{"start":{"row":8,"column":8},"end":{"row":8,"column":9},"action":"insert","lines":["l"]},{"start":{"row":8,"column":9},"end":{"row":8,"column":10},"action":"insert","lines":["B"]},{"start":{"row":8,"column":10},"end":{"row":8,"column":11},"action":"insert","lines":["r"]},{"start":{"row":8,"column":11},"end":{"row":8,"column":12},"action":"insert","lines":["a"]},{"start":{"row":8,"column":12},"end":{"row":8,"column":13},"action":"insert","lines":["n"]},{"start":{"row":8,"column":13},"end":{"row":8,"column":14},"action":"insert","lines":["d"]}],[{"start":{"row":8,"column":14},"end":{"row":8,"column":15},"action":"insert","lines":["s"],"id":68}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":69},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":70},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"insert","lines":["p"]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"insert","lines":["d"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"insert","lines":["."]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"insert","lines":["D"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"insert","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"insert","lines":["t"]}],[{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"insert","lines":["a"],"id":71},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"insert","lines":["F"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"insert","lines":["r"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"insert","lines":["a"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"insert","lines":["m"]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"insert","lines":["e"]}],[{"start":{"row":8,"column":30},"end":{"row":8,"column":32},"action":"insert","lines":["()"],"id":72}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["#"],"id":73}],[{"start":{"row":25,"column":12},"end":{"row":25,"column":13},"action":"insert","lines":["s"],"id":74},{"start":{"row":25,"column":13},"end":{"row":25,"column":14},"action":"insert","lines":["e"]},{"start":{"row":25,"column":14},"end":{"row":25,"column":15},"action":"insert","lines":["l"]},{"start":{"row":25,"column":15},"end":{"row":25,"column":16},"action":"insert","lines":["f"]},{"start":{"row":25,"column":16},"end":{"row":25,"column":17},"action":"insert","lines":["."]}],[{"start":{"row":32,"column":8},"end":{"row":32,"column":9},"action":"insert","lines":["s"],"id":75},{"start":{"row":32,"column":9},"end":{"row":32,"column":10},"action":"insert","lines":["e"]},{"start":{"row":32,"column":10},"end":{"row":32,"column":11},"action":"insert","lines":["l"]},{"start":{"row":32,"column":11},"end":{"row":32,"column":12},"action":"insert","lines":["f"]},{"start":{"row":32,"column":12},"end":{"row":32,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":25,"column":31},"end":{"row":25,"column":32},"action":"insert","lines":["s"],"id":76},{"start":{"row":25,"column":32},"end":{"row":25,"column":33},"action":"insert","lines":["e"]},{"start":{"row":25,"column":33},"end":{"row":25,"column":34},"action":"insert","lines":["l"]},{"start":{"row":25,"column":34},"end":{"row":25,"column":35},"action":"insert","lines":["f"]},{"start":{"row":25,"column":35},"end":{"row":25,"column":36},"action":"insert","lines":["."]}],[{"start":{"row":29,"column":17},"end":{"row":29,"column":18},"action":"insert","lines":["s"],"id":77},{"start":{"row":29,"column":18},"end":{"row":29,"column":19},"action":"insert","lines":["e"]},{"start":{"row":29,"column":19},"end":{"row":29,"column":20},"action":"insert","lines":["l"]},{"start":{"row":29,"column":20},"end":{"row":29,"column":21},"action":"insert","lines":["f"]},{"start":{"row":29,"column":21},"end":{"row":29,"column":22},"action":"insert","lines":["."]}],[{"start":{"row":31,"column":60},"end":{"row":31,"column":61},"action":"insert","lines":["s"],"id":78},{"start":{"row":31,"column":61},"end":{"row":31,"column":62},"action":"insert","lines":["e"]},{"start":{"row":31,"column":62},"end":{"row":31,"column":63},"action":"insert","lines":["l"]},{"start":{"row":31,"column":63},"end":{"row":31,"column":64},"action":"insert","lines":["f"]},{"start":{"row":31,"column":64},"end":{"row":31,"column":65},"action":"insert","lines":["."]}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"remove","lines":["#"],"id":79}],[{"start":{"row":16,"column":8},"end":{"row":16,"column":9},"action":"insert","lines":["s"],"id":80},{"start":{"row":16,"column":9},"end":{"row":16,"column":10},"action":"insert","lines":["e"]},{"start":{"row":16,"column":10},"end":{"row":16,"column":11},"action":"insert","lines":["l"]},{"start":{"row":16,"column":11},"end":{"row":16,"column":12},"action":"insert","lines":["f"]},{"start":{"row":16,"column":12},"end":{"row":16,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":8,"column":31},"end":{"row":8,"column":32},"action":"remove","lines":[")"],"id":81},{"start":{"row":8,"column":30},"end":{"row":8,"column":31},"action":"remove","lines":["("]},{"start":{"row":8,"column":29},"end":{"row":8,"column":30},"action":"remove","lines":["e"]},{"start":{"row":8,"column":28},"end":{"row":8,"column":29},"action":"remove","lines":["m"]},{"start":{"row":8,"column":27},"end":{"row":8,"column":28},"action":"remove","lines":["a"]},{"start":{"row":8,"column":26},"end":{"row":8,"column":27},"action":"remove","lines":["r"]},{"start":{"row":8,"column":25},"end":{"row":8,"column":26},"action":"remove","lines":["F"]},{"start":{"row":8,"column":24},"end":{"row":8,"column":25},"action":"remove","lines":["a"]},{"start":{"row":8,"column":23},"end":{"row":8,"column":24},"action":"remove","lines":["t"]},{"start":{"row":8,"column":22},"end":{"row":8,"column":23},"action":"remove","lines":["a"]},{"start":{"row":8,"column":21},"end":{"row":8,"column":22},"action":"remove","lines":["D"]},{"start":{"row":8,"column":20},"end":{"row":8,"column":21},"action":"remove","lines":["."]},{"start":{"row":8,"column":19},"end":{"row":8,"column":20},"action":"remove","lines":["d"]},{"start":{"row":8,"column":18},"end":{"row":8,"column":19},"action":"remove","lines":["p"]},{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"remove","lines":[" "]},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"remove","lines":["="]},{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"remove","lines":[" "]}],[{"start":{"row":8,"column":15},"end":{"row":8,"column":16},"action":"insert","lines":[" "],"id":82},{"start":{"row":8,"column":16},"end":{"row":8,"column":17},"action":"insert","lines":["="]}],[{"start":{"row":8,"column":17},"end":{"row":8,"column":18},"action":"insert","lines":[" "],"id":83}],[{"start":{"row":8,"column":18},"end":{"row":8,"column":20},"action":"insert","lines":["\"\""],"id":84}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"remove","lines":["#"],"id":85}],[{"start":{"row":33,"column":16},"end":{"row":33,"column":17},"action":"insert","lines":["s"],"id":86},{"start":{"row":33,"column":17},"end":{"row":33,"column":18},"action":"insert","lines":["e"]},{"start":{"row":33,"column":18},"end":{"row":33,"column":19},"action":"insert","lines":["l"]},{"start":{"row":33,"column":19},"end":{"row":33,"column":20},"action":"insert","lines":["f"]},{"start":{"row":33,"column":20},"end":{"row":33,"column":21},"action":"insert","lines":["."]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":87}],[{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"insert","lines":["s"],"id":88},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"insert","lines":["e"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"insert","lines":["l"]},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"insert","lines":["f"]},{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"insert","lines":["."]}],[{"start":{"row":36,"column":12},"end":{"row":36,"column":13},"action":"remove","lines":["."],"id":89},{"start":{"row":36,"column":11},"end":{"row":36,"column":12},"action":"remove","lines":["f"]},{"start":{"row":36,"column":10},"end":{"row":36,"column":11},"action":"remove","lines":["l"]},{"start":{"row":36,"column":9},"end":{"row":36,"column":10},"action":"remove","lines":["e"]},{"start":{"row":36,"column":8},"end":{"row":36,"column":9},"action":"remove","lines":["s"]}],[{"start":{"row":35,"column":15},"end":{"row":35,"column":16},"action":"insert","lines":["_"],"id":90},{"start":{"row":35,"column":16},"end":{"row":35,"column":17},"action":"insert","lines":["a"]},{"start":{"row":35,"column":17},"end":{"row":35,"column":18},"action":"insert","lines":["l"]},{"start":{"row":35,"column":18},"end":{"row":35,"column":19},"action":"insert","lines":["l"]}],[{"start":{"row":33,"column":8},"end":{"row":33,"column":9},"action":"insert","lines":["#"],"id":91}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":92}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":93}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"remove","lines":["#"],"id":94}],[{"start":{"row":39,"column":12},"end":{"row":39,"column":13},"action":"insert","lines":["_"],"id":95},{"start":{"row":39,"column":13},"end":{"row":39,"column":14},"action":"insert","lines":["a"]},{"start":{"row":39,"column":14},"end":{"row":39,"column":15},"action":"insert","lines":["l"]},{"start":{"row":39,"column":15},"end":{"row":39,"column":16},"action":"insert","lines":["l"]}],[{"start":{"row":39,"column":16},"end":{"row":39,"column":18},"action":"insert","lines":["()"],"id":96}],[{"start":{"row":38,"column":0},"end":{"row":39,"column":0},"action":"insert","lines":["",""],"id":97}],[{"start":{"row":38,"column":0},"end":{"row":38,"column":1},"action":"insert","lines":["#"],"id":98}],[{"start":{"row":38,"column":1},"end":{"row":38,"column":2},"action":"insert","lines":[" "],"id":99},{"start":{"row":38,"column":2},"end":{"row":38,"column":3},"action":"insert","lines":["F"]},{"start":{"row":38,"column":3},"end":{"row":38,"column":4},"action":"insert","lines":["o"]},{"start":{"row":38,"column":4},"end":{"row":38,"column":5},"action":"insert","lines":["r"]}],[{"start":{"row":38,"column":5},"end":{"row":38,"column":6},"action":"insert","lines":[" "],"id":100},{"start":{"row":38,"column":6},"end":{"row":38,"column":7},"action":"insert","lines":["D"]},{"start":{"row":38,"column":7},"end":{"row":38,"column":8},"action":"insert","lines":["e"]},{"start":{"row":38,"column":8},"end":{"row":38,"column":9},"action":"insert","lines":["b"]},{"start":{"row":38,"column":9},"end":{"row":38,"column":10},"action":"insert","lines":["u"]},{"start":{"row":38,"column":10},"end":{"row":38,"column":11},"action":"insert","lines":["g"]}],[{"start":{"row":39,"column":0},"end":{"row":39,"column":1},"action":"insert","lines":["#"],"id":101}],[{"start":{"row":40,"column":0},"end":{"row":40,"column":1},"action":"insert","lines":["#"],"id":102}]]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":40,"column":1},"end":{"row":40,"column":1},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1542509718542,"hash":"72aa41b34248e38e6225a8cdc232de27a235d1a5"} | 33,798 | 33,798 | 0.533967 | [
"MIT"
] | Grawor/auto_bot | .c9/metadata/environment/kabutan_scraping.py | 33,818 | Python |
ROOT_SCOPE_METHOD( MD( 'Not', 'NOT_CLAUSE_FACTORY_single()' ) )
TEST( """ if [ Not @ ( Integer ) consume [ 1 2 3 ] ] then [ . 1 ] else [ . 2 ] == 2 """ )
TEST( """ Not @ ( Word ) consume [ 1 2 3 ] next value == 1 """ )
OBJECT( 'NOT_CLAUSE_FACTORY',
methods = [
MS( ARG( CW( '@' ), CG( 'CLAUSE', 'clause' ) ), """
JUMP__return_ANY( CONTEXT, CONTEXT, $CA(NOT_CLAUSE_new( PARAM_clause )) ) ;
""" ),
]
)
OBJECT( 'NOT_CLAUSE',
inherit = [ 'CLAUSE' ],
attributes = [
A( 'ANY', 'clause' ),
],
methods = [
MS( ARG( CW( 'consume' ), CG( 'LIST', 'phrase' ) ), """
JUMP__consume_LIST( $CA(FRAME__NOT_CLAUSE_1_new( CONTEXT, PARAM_phrase )), ACTION->clause, PARAM_phrase ) ;
""" ),
],
dump = D( '%s', '$DUMP( object->clause )' )
)
FRAME( 'NOT_CLAUSE_1',
attributes = [
A( 'ANY', 'phrase' ),
],
methods = [
MS( ARG( CW( 'return' ), CG( 'ANY', 'value' ) ), """
nom_fail( ACTION->parent, "Input rejected by Not", $NONE ) ;
""" ),
MS( ARG( CW( 'fail' ), CG( 'ANY', 'error' ) ), """
JUMP__return_ANY( ACTION->parent, ACTION->parent, $CA(ELEMENT_new( $NONE, ACTION->phrase )) ) ;
""" ),
]
)
| 25.434783 | 113 | 0.524786 | [
"MIT"
] | thomasmf/nomenine | src/core/runtime/clause/not_clause.py | 1,170 | Python |
import dash_bootstrap_components as dbc
import dash_html_components as html
from utils import UrlIndex
def NavBar(pathname=None):
return dbc.Nav([
dbc.NavItem(
dbc.NavLink(
html.H4("Home", className='mt-1', style={'color': (
'white' if pathname == UrlIndex.HOME.value or pathname == UrlIndex.ROOT.value else 'black')}),
active=(pathname == UrlIndex.HOME.value or pathname == UrlIndex.ROOT.value),
href=UrlIndex.HOME.value
)
),
dbc.NavItem(
dbc.NavLink(
html.H4("Plot", className='mt-1',
style={'color': ('white' if pathname == UrlIndex.PLOT.value else 'black')}),
active=(pathname == UrlIndex.PLOT.value),
href=UrlIndex.PLOT.value
)
),
dbc.NavItem(
dbc.NavLink(
html.H4("Help", className='mt-1',
style={'color': ('white' if pathname == UrlIndex.HELP.value else 'black')}),
active=(pathname == UrlIndex.HELP.value),
href=UrlIndex.HELP.value
)
),
dbc.NavItem(
dbc.NavLink(
html.H4("Get in touch", className='mt-1',
style={'color': ('white' if pathname == UrlIndex.CONTACT.value else 'black')}),
active=(pathname == UrlIndex.CONTACT.value),
href=UrlIndex.CONTACT.value
)
),
dbc.NavItem(
dbc.NavLink(
html.H4("Rigden Lab", className='mt-1',
style={'color': ('white' if pathname == UrlIndex.RIGDEN.value else 'black')}),
active=(pathname == UrlIndex.RIGDEN.value),
href=UrlIndex.RIGDEN.value
)
)
], pills=True, fill=True, justified=True, style={'border-bottom': '2px solid', 'border-top': '2px solid'}
)
def Footer(fixed=False):
if fixed:
style = {'position': 'fixed', 'background': 'gray', 'width': '100%', 'bottom': '0px'}
else:
style = {}
return html.Div([
html.Br(),
html.Br(),
html.Br(),
dbc.Navbar([
dbc.Row(
dbc.Col([
html.Small('Hosted by', className='mr-3'),
html.A(html.Img(src='/assets/ccp4-online_logo.png', height='35hv'), href=UrlIndex.CCP4_ONLINE.value)
], width=12), className='text-center container-fluid'
)
], className='footer py-3', style=style)
])
| 36.690141 | 120 | 0.503263 | [
"BSD-3-Clause"
] | rigdenlab/conkit-web | components/navbar.py | 2,605 | Python |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
| 31.695402 | 78 | 0.640163 | [
"MIT"
] | ThomasEdwardRiley/nestcheck | nestcheck/estimators.py | 11,030 | Python |
class ServiceMedia:
def __init__(self, org_id, service_id, service_row_id, url, order, file_type, asset_type, alt_text, ipfs_url):
self._service_row_id = service_row_id
self._org_id = org_id
self._service_id = service_id
self._url = url
self._order = order
self._file_type = file_type
self._asset_type = asset_type
self._alt_text = alt_text
self._ipfs_url = ipfs_url
def to_dict(self):
return {
"service_row_id": self._service_row_id,
"org_id": self._org_id,
"service_id": self._service_id,
"url": self._url,
"order": self._order,
"file_type": self._file_type,
"asset_type": self._asset_type,
"alt_text": self._alt_text
}
@property
def org_id(self):
return self._org_id
@org_id.setter
def org_id(self, org_id):
self._org_id = org_id
@property
def service_id(self):
return self._service_id
@service_id.setter
def service_id(self, service_id):
self._service_id = service_id
@property
def service_row_id(self):
return self._service_row_id
@service_row_id.setter
def service_row_id(self, service_row_id):
self._service_row_id = service_row_id
@property
def url(self):
return self._url
@url.setter
def url(self, url):
self._url = url
@property
def order(self):
return self._order
@order.setter
def order(self, order):
self._order = order
@property
def file_type(self):
return self._file_type
@file_type.setter
def file_type(self, file_type):
self._file_type = file_type
@property
def asset_type(self):
return self._asset_type
@asset_type.setter
def asset_type(self, asset_type):
self._asset_type = asset_type
@property
def alt_text(self):
return self._alt_text
@alt_text.setter
def alt_text(self, alt_text):
self._alt_text = alt_text
@property
def ipfs_url(self):
return self._ipfs_url
@ipfs_url.setter
def ipfs_url(self, ipfs_url):
self.ipfs_url = ipfs_url
| 23.458333 | 114 | 0.618117 | [
"MIT"
] | Karthi96/snet-marketplace-service | contract_api/domain/models/service_media.py | 2,252 | Python |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
| 36.952675 | 106 | 0.595189 | [
"Apache-2.0"
] | shawnbrown/gpn | toron/_node_schema.py | 18,111 | Python |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 09:10:56 2018
@author: gtucker
"""
import numpy as np
import datetime
from grainhill import GrainFacetSimulator
from grainhill import SlopeMeasurer
import landlab
from landlab.io.native_landlab import save_grid
import os
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory ' + directory)
params = {
'grid_size' : (111, 81),
'report_interval' : 5.0,
'output_interval' : 1.0e99,
'disturbance_rate' : 1.0e-4,
'weathering_rate' : 0.0,
'dissolution_rate': 0.0,
'friction_coef' : 1.0,
'fault_x' : -0.01,
'cell_width' : 0.5,
'grav_accel' : 9.8,
}
# Open a file to record output:
d = datetime.datetime.today()
today_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2)
results_file = open('results_v_vs_w' + today_str + '.csv', 'w')
results_file.write('Landlab version,' + landlab.__version__ + ',\n')
# Print header in file
results_file.write('Uplift interval (yr),Weathering rate '
+ 'parameter (1/yr),Gradient (m/m),'
+ 'Slope angle (deg)\n')
# Sweep through a range of dissolution rate parameters
for uplift_interval_exp in np.arange(2, 5.2, 0.2):
for weath_exp in np.arange(-5, -1.8, 0.2):
weath_rate = 10.0**weath_exp
uplift_interval = 10.0**uplift_interval_exp
params['uplift_interval'] = uplift_interval
params['weathering_rate'] = weath_rate
# Set run duration long enough for uplift of 150 rows
params['run_duration'] = 100 * uplift_interval
params['plot_interval'] = 10 * uplift_interval
print('Uplift interval: ' + str(params['uplift_interval']) + ' 1/y')
print('Weathering rate: ' + str(params['weathering_rate']) + ' 1/y')
opname = ('tau' + str(int(round(10 * uplift_interval_exp))) + 'w' + str(int(round(10 * weath_exp))))
create_folder(opname)
params['plot_file_name'] = opname + '/' + opname
gfs = GrainFacetSimulator(**params)
gfs.run()
sm = SlopeMeasurer(gfs)
sm.pick_rock_surface()
(m, b) = sm.fit_straight_line_to_surface()
angle = np.degrees(np.arctan(m))
results_file.write(str(uplift_interval) + ',' + str(weath_rate) + ','
+ str(m) + ',' + str(angle) + '\n')
results_file.flush()
save_grid(gfs.grid, opname + '/' + opname + '.grid', clobber=True)
results_file.close()
| 29.556818 | 108 | 0.619377 | [
"MIT"
] | gregtucker/facetmorphology | ModelRunScripts/SensitivityAnalysisDandV/run_v_w.py | 2,601 | Python |
from Jumpscale import j
import re
# ACTIONS
## R = Regex Replace
## RI = Regex Replace case insensitive
DO = """
RI| j.application.JSBase$ | j.baseclasses.object
RI| j.data.cache. | j.core.cache.
RI| j.data.text. | j.core.text.
RI| from jumpscale import j | from Jumpscale import j
RI| j.application.jsbase_get_class() | j.baseclasses.object
RI| .base_class_config | .JSBaseClassConfig
RI| .base_class_configs | .JSBaseClassConfigs
RI| j.logging. | j.logger.
RI | Jumpscale.logging. | Jumpscale.core.logging.
RI| self._location | self.__jslocation__
RI| j.data.serializer. | j.data.serializers.
RI| self.prefab.core.file_write | j.sal.fs.writeFile
RI| self.prefab.core.run | j.sal.process.execute
RI| self.prefab.core.createDir | j.sal.fs.createDir
RI| self.prefab.core.file_download | self.prefab.core.file_download
RI| self.prefab.system.package.install | j.builders.system.package.ensure
"""
ERRORS = """
configmanager._base_class_config
"""
JSBASE = j.baseclasses.object
class FixerReplacer(j.baseclasses.object):
def __init__(self):
JSBASE.__init__(self)
self.rules = []
for rule in DO.split("\n"):
if rule.strip() == "":
continue
if rule.strip().startswith("#"):
continue
cmd, from_, to_ = rule.split("|")
if cmd.lower().strip() == "ri":
self.rules.append(ReplaceIgnoreCase(from_, to_))
elif cmd.lower().strip() == "r":
self.rules.append(ReplaceNormal(from_, to_))
else:
raise j.exceptions.Base("unknown rule:%s" % rule)
def line_process(self, line):
changed = False
# if "\t" in line:
# line = line.replace("\t"," ")
# changed = True
for rule in self.rules:
line1 = rule.replace(line)
if line1 != line:
changed = True
line = line1
return changed, line
def file_process(self, path, write=False, root=""):
out = ""
nr = 0
for line in j.sal.fs.readFile(path).split("\n"):
nr += 1
changed, line2 = self.line_process(line)
if changed:
path2 = j.sal.fs.pathRemoveDirPart(path, root)
if path2 not in self.changes:
self.changes[path2] = {}
changes = self.changes[path2]
changes["line"] = nr
changes["from"] = line
changes["to.."] = line2
out += "%s\n" % line2
else:
out += "%s\n" % line
if len(self.changes) > 0 and write:
j.sal.fs.writeFile(path, out)
def dir_process(self, path, extensions=["py", "txt", "md"], recursive=True, write=False):
path = j.sal.fs.pathNormalize(path)
self.changes = {}
for ext in extensions:
for p in j.sal.fs.listFilesInDir(path, recursive=recursive, filter="*.%s" % ext, followSymlinks=False):
self._log_debug("process file:%s" % p)
self.file_process(root=path, path=p, write=write)
print(j.data.serializers.yaml.dumps(self.changes))
class ReplaceIgnoreCase:
def __init__(self, from_, to_, prepend="", append=""):
self.from_ = from_.strip()
self.to_ = to_.strip()
self.regex = re.compile(re.escape(prepend + self.from_ + append), re.IGNORECASE | re.VERBOSE)
def replace(self, txt):
m = self.regex.search(txt)
if m:
found = m.string[m.start() : m.end()]
txt2 = txt.replace(found, self.to_)
return txt2
else:
return txt
class ReplaceNormal(ReplaceIgnoreCase):
def __init__(self, from_, to_, prepend="", append=""):
ReplaceIgnoreCase.__init__(self, from_, to_, re.VERBOSE)
self.regex = re.compile(re.escape(prepend + self.from_ + append))
| 33.564103 | 115 | 0.580087 | [
"Apache-2.0"
] | threefoldtech/sandbox_threebot_linux64 | sandbox/lib/jumpscale/JumpscaleLibs/tools/fixer/FixerReplace.py | 3,927 | Python |
"""Client for Triton Inference Server using REST API.
References:
-
https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest
-
https://github.com/triton-inference-server/client/tree/master/src/python/examples
-
https://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py
"""
import json
import time
import threading
import distribution
import clients.base_rest_client
import clients.utils
import tensorflow.compat.v1 as tf
import requests as r
import numpy as np
import tritonclient.http as triton_httpclient
import tritonclient.utils as triton_utils
from tensorflow.python.framework import dtypes
class TritonRest(clients.base_rest_client.BaseRestClient):
def generate_rest_request_from_dictionary(self, row_dict):
triton_request_inputs = []
for key, value in row_dict.items():
t = clients.utils.get_type(value, self._default_float_type,
self._default_int_type)
if t == np.object_:
value = clients.utils.map_multi_dimensional_list(
value, lambda s: s.encode("utf-8"))
numpy_value = np.array(value, dtype=t)
triton_request_input = triton_httpclient.InferInput(
key, list(numpy_value.shape), triton_utils.np_to_triton_dtype(t))
triton_request_input.set_data_from_numpy(
numpy_value, binary_data=True) # binary_data=True by default
triton_request_inputs.append(triton_request_input)
# https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.py#L81
# Returns tuple - request and request len to pass in Infer-Header-Content-Length header
(request, json_size) = triton_httpclient._get_inference_request(
inputs=triton_request_inputs,
request_id="",
outputs=None,
sequence_id=0,
sequence_start=0,
sequence_end=0,
priority=0,
timeout=None)
headers = {}
if json_size:
headers["Inference-Header-Content-Length"] = str(json_size)
return (request, headers)
def get_requests_from_dictionary(self, path):
rows = []
with tf.gfile.GFile(path, "r") as f:
for line in f:
row_dict = eval(line)
rows.append(self.generate_rest_request_from_dictionary(row_dict))
return rows
def get_requests_from_tfrecord(self, path, count, batch_size):
raise NotImplementedError()
def get_requests_from_file(self, path):
raise NotImplementedError()
def get_uri(self):
if self._host.startswith("http"):
return self._host
else:
# https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest
if self._model_version:
return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/versions/{self._model_version}/infer"
else:
return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/infer"
| 36.597561 | 154 | 0.726425 | [
"MIT"
] | vlasenkoalexey/tensorflow_serving_benchmark | clients/triton_rest.py | 3,001 | Python |